Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 24 Mar 2017 21:37:12 +0000 (14:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 24 Mar 2017 21:37:12 +0000 (14:37 -0700)
Pull block fixes from Jens Axboe:
 "A few fixes for the current series that should go into -rc4. This
  contains:

   - a fix for a potential corruption of un-started requests from Ming.

   - a blk-stat fix from Omar, ensuring we flush the stat batch before
     checking nr_samples.

   - a set of fixes from Sagi for the nvmeof family"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: don't complete un-started request in timeout handler
  nvme-loop: handle cpu unplug when re-establishing the controller
  nvme-rdma: handle cpu unplug when re-establishing the controller
  nvmet-rdma: Fix a possible uninitialized variable dereference
  nvmet: confirm sq percpu has scheduled and switched to atomic
  nvme-loop: fix a possible use-after-free when destroying the admin queue
  blk-stat: fix blk_stat_sum() if all samples are batched

419 files changed:
Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am335x-pcm-953.dtsi
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/bcm953012k.dts
arch/arm/boot/dts/bcm958522er.dts
arch/arm/boot/dts/bcm958525er.dts
arch/arm/boot/dts/bcm958525xmc.dts
arch/arm/boot/dts/bcm958622hr.dts
arch/arm/boot/dts/bcm958623hr.dts
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/bcm988312hr.dts
arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/ste-href.dtsi
arch/arm/boot/dts/ste-snowball.dts
arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
arch/arm/boot/dts/sun8i-a23-a33.dtsi
arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
arch/arm/configs/omap2plus_defconfig
arch/arm/mach-at91/pm.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/gpmc-nand.c [deleted file]
arch/arm/mach-omap2/gpmc-onenand.c
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/tools/syscall.tbl
arch/arm64/Kconfig
arch/arm64/boot/dts/broadcom/ns2.dtsi
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuidle.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/mm/kasan_init.c
arch/openrisc/include/asm/cmpxchg.h
arch/openrisc/include/asm/uaccess.h
arch/openrisc/kernel/or32_ksyms.c
arch/openrisc/kernel/process.c
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/uaccess.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/module.c
arch/parisc/kernel/perf.c
arch/parisc/kernel/process.c
arch/parisc/kernel/syscall_table.S
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/platforms/pseries/lpar.c
arch/x86/events/core.c
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/head64.c
arch/x86/kernel/nmi.c
arch/x86/kernel/tsc.c
arch/x86/kernel/unwind_frame.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/mpx.c
arch/x86/platform/intel-mid/device_libs/Makefile
arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
arch/x86/platform/intel-mid/mfld.c
drivers/acpi/acpi_processor.c
drivers/acpi/bus.c
drivers/acpi/processor_core.c
drivers/base/core.c
drivers/bluetooth/Kconfig
drivers/char/hw_random/amd-rng.c
drivers/char/hw_random/geode-rng.c
drivers/clocksource/tcb_clksrc.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/sysfs.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-dmaengine.c
drivers/dax/dax.c
drivers/gpio/gpio-altera-a10sr.c
drivers/gpio/gpio-altera.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-xgene.c
drivers/gpu/drm/amd/acp/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/debug.h
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/render.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/hid/Kconfig
drivers/hid/hid-chicony.c
drivers/hid/hid-core.c
drivers/hid/hid-corsair.c
drivers/hid/hid-ids.h
drivers/hid/hid-sony.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/input/joystick/iforce/iforce-usb.c
drivers/input/misc/cm109.c
drivers/input/misc/ims-pcu.c
drivers/input/misc/yealink.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/rmi4/rmi_f30.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/hanwang.c
drivers/input/tablet/kbtab.c
drivers/input/touchscreen/sur40.c
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/iommu.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/platform/coda/imx-vdoa.c
drivers/media/platform/exynos-gsc/gsc-core.c
drivers/media/platform/sti/bdisp/bdisp-v4l2.c
drivers/media/usb/dvb-usb/dvb-usb-firmware.c
drivers/memory/omap-gpmc.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/sdhci-of-arasan.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/ushc.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/fjes/fjes_main.c
drivers/net/hyperv/netvsc.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/remoteproc/Kconfig
drivers/scsi/Kconfig
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/hpsa_cmd.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/ufs/ufshcd.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_sbc.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tty/serial/st-asc.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vsock.c
drivers/xen/gntdev.c
drivers/xen/xen-acpi-processor.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/file.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/misc.c
fs/afs/mntpt.c
fs/afs/rxrpc.c
fs/afs/security.c
fs/afs/server.c
fs/afs/vlocation.c
fs/afs/write.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/node.c
fs/f2fs/segment.c
fs/nfs/callback.c
fs/nfs/client.c
fs/nfs/filelayout/filelayoutdev.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfs/write.c
fs/xfs/libxfs/xfs_dir2_priv.h
fs/xfs/libxfs/xfs_dir2_sf.c
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_inode_fork.h
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_inode.c
include/linux/acpi.h
include/linux/ccp.h
include/linux/device.h
include/linux/errqueue.h
include/linux/gpio/consumer.h
include/linux/iommu.h
include/linux/kasan.h
include/linux/mlx4/device.h
include/linux/omap-gpmc.h
include/linux/reset.h
include/linux/virtio_vsock.h
include/net/af_vsock.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_ipv6.h
include/net/sctp/structs.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/uapi/asm-generic/unistd.h
include/uapi/drm/omap_drm.h
include/uapi/linux/btrfs.h
include/video/exynos5433_decon.h
kernel/bpf/hashtab.c
kernel/cpu.c
kernel/events/core.c
kernel/futex.c
kernel/locking/rwsem-spinlock.c
kernel/memremap.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/loadavg.c
mm/memory_hotplug.c
mm/swap_slots.c
mm/vmalloc.c
mm/z3fold.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_common.c
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bridge/br_fdb.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_private.h
net/ceph/messenger.c
net/core/netclassid_cgroup.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/fib_frontend.c
net/ipv4/ip_fragment.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nft_masq_ipv4.c
net/ipv4/netfilter/nft_redir_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv6/netfilter/nft_masq_ipv6.c
net/ipv6/netfilter/nft_redir_ipv6.c
net/ipv6/route.c
net/ipv6/udp.c
net/mpls/af_mpls.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_nat_proto_sctp.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_ct.c
net/netfilter/nft_meta.c
net/netfilter/nft_nat.c
net/netfilter/nft_set_bitmap.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/openvswitch/flow_netlink.c
net/rxrpc/conn_event.c
net/sched/sch_dsmark.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/outqueue.c
net/socket.c
net/sunrpc/xprtrdma/verbs.c
net/tipc/subscr.c
net/unix/garbage.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/nl80211.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_memory.c
sound/core/seq/seq_memory.h
sound/pci/ctxfi/cthw20k1.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/x86/Kconfig
tools/perf/util/symbol.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_maps.c

index a782659..ca5204b 100644 (file)
@@ -4,7 +4,6 @@ Required properties:
   - compatible: value should be one of the following
                "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
                "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
-               "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
                "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
                "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
                "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
index 18645e0..5837402 100644 (file)
@@ -11,7 +11,6 @@ Required properties:
                "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
                "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
                "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
-               "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
                "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
                "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
 
index ea9c1c9..520d61d 100644 (file)
@@ -13,7 +13,7 @@ Required Properties:
        - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
                                                        before RK3288
        - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
-       - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108
+       - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
        - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
        - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
        - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
index c776906..c45c02b 100644 (file)
@@ -3216,7 +3216,6 @@ F:        drivers/platform/chrome/
 
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
-M:     Sujith Sankar <ssujith@cisco.com>
 M:     Govindarajulu Varadarajan <_govind@gmx.com>
 M:     Neel Patel <neepatel@cisco.com>
 S:     Supported
@@ -7774,13 +7773,6 @@ F:       include/net/mac80211.h
 F:     net/mac80211/
 F:     drivers/net/wireless/mac80211_hwsim.[ch]
 
-MACVLAN DRIVER
-M:     Patrick McHardy <kaber@trash.net>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/macvlan.c
-F:     include/linux/if_macvlan.h
-
 MAILBOX API
 M:     Jassi Brar <jassisinghbrar@gmail.com>
 L:     linux-kernel@vger.kernel.org
@@ -7853,6 +7845,8 @@ F:        drivers/net/ethernet/marvell/mvneta.*
 MARVELL MWIFIEX WIRELESS DRIVER
 M:     Amitkumar Karwar <akarwar@marvell.com>
 M:     Nishant Sarmukadam <nishants@marvell.com>
+M:     Ganapathi Bhat <gbhat@marvell.com>
+M:     Xinming Hu <huxm@marvell.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/marvell/mwifiex/
@@ -13383,14 +13377,6 @@ W:     https://linuxtv.org
 S:     Maintained
 F:     drivers/media/platform/vivid/*
 
-VLAN (802.1Q)
-M:     Patrick McHardy <kaber@trash.net>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/macvlan.c
-F:     include/linux/if_*vlan.h
-F:     net/8021q/
-
 VLYNQ BUS
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     openwrt-devel@lists.openwrt.org (subscribers-only)
index b841fb3..b2faa93 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 02981ea..1ec8e0d 100644 (file)
                        label = "home";
                        linux,code = <KEY_HOME>;
                        gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                button@1 {
                        label = "menu";
                        linux,code = <KEY_MENU>;
                        gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
        };
index 0d341c5..e5ac1d8 100644 (file)
                        /* ID & VBUS GPIOs provided in board dts */
                };
        };
+
+       tpic2810: tpic2810@60 {
+               compatible = "ti,tpic2810";
+               reg = <0x60>;
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
 };
 
 &mcspi3 {
                spi-max-frequency = <1000000>;
                spi-cpol;
        };
-
-       tpic2810: tpic2810@60 {
-               compatible = "ti,tpic2810";
-               reg = <0x60>;
-               gpio-controller;
-               #gpio-cells = <2>;
-       };
 };
 
 &uart3 {
index 4fbb089..00de62d 100644 (file)
                timer@20200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x20200 0x100>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
                local-timer@20600 {
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x20600 0x100>;
-                       interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
index bfd9230..ae31a58 100644 (file)
        };
 
        memory {
-               reg = <0x00000000 0x10000000>;
+               reg = <0x80000000 0x10000000>;
        };
 };
 
 &uart0 {
-       clock-frequency = <62499840>;
+       status = "okay";
 };
 
 &uart1 {
-       clock-frequency = <62499840>;
        status = "okay";
 };
index 3f04a40..df05e7f 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 9fd5422..4a3ab19 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 41e7fd3..81f7843 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 477c486..c88b8fe 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index c0a499d..d503fa0 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index f7eb585..cc0363b 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 1666632..74e15a3 100644 (file)
@@ -55,6 +55,7 @@
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+               open-source;
                priority = <200>;
        };
 };
index 49f466f..dcfc975 100644 (file)
        };
 };
 
-&cpu0 {
-       arm-supply = <&sw1a_reg>;
-       soc-supply = <&sw1c_reg>;
-};
-
 &fec1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet1>;
index 22332be..528b4e9 100644 (file)
                };
 
                usb1: ohci@00400000 {
-                       compatible = "atmel,sama5d2-ohci", "usb-ohci";
+                       compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00400000 0x100000>;
                        interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
                        clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
index 82d8c47..162e1eb 100644 (file)
@@ -14,6 +14,7 @@
 #include <dt-bindings/mfd/dbx500-prcmu.h>
 #include <dt-bindings/arm/ux500_pm_domains.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ste-ab8500.h>
 #include "skeleton.dtsi"
 
 / {
                                interrupt-controller;
                                #interrupt-cells = <2>;
 
+                               ab8500_clock: clock-controller {
+                                       compatible = "stericsson,ab8500-clk";
+                                       #clock-cells = <1>;
+                               };
+
                                ab8500_gpio: ab8500-gpio {
                                        compatible = "stericsson,ab8500-gpio";
                                        gpio-controller;
 
                                ab8500-pwm {
                                        compatible = "stericsson,ab8500-pwm";
+                                       clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
+                                       clock-names = "intclk";
                                };
 
                                ab8500-debugfs {
                                        V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
                                        V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
 
+                                       clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
+                                       clock-names = "audioclk";
+
                                        stericsson,earpeice-cmv = <950>; /* Units in mV. */
                                };
 
                        status = "disabled";
                };
 
+               sound {
+                       compatible = "stericsson,snd-soc-mop500";
+                       stericsson,cpu-dai = <&msp1 &msp3>;
+                       stericsson,audio-codec = <&codec>;
+                       clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
+                       clock-names = "sysclk", "ulpclk", "intclk";
+               };
+
                msp0: msp@80123000 {
                        compatible = "stericsson,ux500-msp-i2s";
                        reg = <0x80123000 0x1000>;
index f37f9e1..9e359e4 100644 (file)
                        status = "okay";
                };
 
-               sound {
-                       compatible = "stericsson,snd-soc-mop500";
-
-                       stericsson,cpu-dai = <&msp1 &msp3>;
-                       stericsson,audio-codec = <&codec>;
-                       clocks = <&prcmu_clk PRCMU_SYSCLK>;
-                       clock-names = "sysclk";
-               };
-
                msp0: msp@80123000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&msp0_default_mode>;
index dd5514d..ade1d0d 100644 (file)
                                     "", "", "", "", "", "", "", "";
                };
 
-               sound {
-                       compatible = "stericsson,snd-soc-mop500";
-
-                       stericsson,cpu-dai = <&msp1 &msp3>;
-                       stericsson,audio-codec = <&codec>;
-                       clocks = <&prcmu_clk PRCMU_SYSCLK>;
-                       clock-names = "sysclk";
-               };
-
                msp0: msp@80123000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&msp0_default_mode>;
index 72ec0d5..bbf1c8c 100644 (file)
                                        reg = <8>;
                                        label = "cpu";
                                        ethernet = <&gmac>;
-                                       phy-mode = "rgmii";
+                                       phy-mode = "rgmii-txid";
                                        fixed-link {
                                                speed = <1000>;
                                                full-duplex;
index a952cc0..8a3ed21 100644 (file)
                        resets = <&ccu RST_BUS_GPU>;
 
                        assigned-clocks = <&ccu CLK_GPU>;
-                       assigned-clock-rates = <408000000>;
+                       assigned-clock-rates = <384000000>;
                };
 
                gic: interrupt-controller@01c81000 {
index 7097c18..d6bd158 100644 (file)
@@ -50,8 +50,6 @@
 
        backlight: backlight {
                compatible = "pwm-backlight";
-               pinctrl-names = "default";
-               pinctrl-0 = <&bl_en_pin>;
                pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
                brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
                default-brightness-level = <8>;
 };
 
 &pio {
-       bl_en_pin: bl_en_pin@0 {
-               pins = "PH6";
-               function = "gpio_in";
-       };
-
        mmc0_cd_pin: mmc0_cd_pin@0 {
                pins = "PB4";
                function = "gpio_in";
index f2462a6..decd388 100644 (file)
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SPI=m
 CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_MOUSEDEV=m
 CONFIG_INPUT_JOYDEV=m
 CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_ATKBD=m
index 3d89b79..a277981 100644 (file)
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
                at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
+static void sama5d3_ddr_standby(void)
+{
+       u32 lpr0;
+       u32 saved_lpr0;
+
+       saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+       lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+       lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+       at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+       cpu_do_idle();
+
+       at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
        { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
        { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
        { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
-       { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+       { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
        { /*sentinel*/ }
 };
 
index 093458b..c89757a 100644 (file)
@@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010)   += usb-tusb6010.o
 
 onenand-$(CONFIG_MTD_ONENAND_OMAP2)    := gpmc-onenand.o
 obj-y                                  += $(onenand-m) $(onenand-y)
-
-nand-$(CONFIG_MTD_NAND_OMAP2)          := gpmc-nand.o
-obj-y                                  += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644 (file)
index f6ac027..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * gpmc-nand.c
- *
- * Copyright (C) 2009 Texas Instruments
- * Vimal Singh <vimalsingh@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/omap-gpmc.h>
-#include <linux/mtd/nand.h>
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include <asm/mach/flash.h>
-
-#include "soc.h"
-
-/* minimum size for IO mapping */
-#define        NAND_IO_SIZE    4
-
-static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
-{
-       /* platforms which support all ECC schemes */
-       if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
-                soc_is_omap54xx() || soc_is_dra7xx())
-               return 1;
-
-       if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
-                ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
-               if (cpu_is_omap24xx())
-                       return 0;
-               else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
-                       return 0;
-               else
-                       return 1;
-       }
-
-       /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
-        * which require H/W based ECC error detection */
-       if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
-           ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
-                (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
-               return 0;
-
-       /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
-       if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
-           ecc_opt == OMAP_ECC_HAM1_CODE_SW)
-               return 1;
-       else
-               return 0;
-}
-
-/* This function will go away once the device-tree convertion is complete */
-static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
-                           struct gpmc_settings *s)
-{
-       /* Enable RD PIN Monitoring Reg */
-       if (gpmc_nand_data->dev_ready) {
-               s->wait_on_read = true;
-               s->wait_on_write = true;
-       }
-
-       if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
-               s->device_width = GPMC_DEVWIDTH_16BIT;
-       else
-               s->device_width = GPMC_DEVWIDTH_8BIT;
-}
-
-int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
-                  struct gpmc_timings *gpmc_t)
-{
-       int err = 0;
-       struct gpmc_settings s;
-       struct platform_device *pdev;
-       struct resource gpmc_nand_res[] = {
-               { .flags = IORESOURCE_MEM, },
-               { .flags = IORESOURCE_IRQ, },
-               { .flags = IORESOURCE_IRQ, },
-       };
-
-       BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
-
-       err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
-                             (unsigned long *)&gpmc_nand_res[0].start);
-       if (err < 0) {
-               pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
-                      gpmc_nand_data->cs, err);
-               return err;
-       }
-       gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
-       gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
-       gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
-
-       memset(&s, 0, sizeof(struct gpmc_settings));
-       gpmc_set_legacy(gpmc_nand_data, &s);
-
-       s.device_nand = true;
-
-       if (gpmc_t) {
-               err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
-               if (err < 0) {
-                       pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
-                              err);
-                       return err;
-               }
-       }
-
-       err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
-       if (err < 0)
-               goto out_free_cs;
-
-       err = gpmc_configure(GPMC_CONFIG_WP, 0);
-       if (err < 0)
-               goto out_free_cs;
-
-       if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
-               pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
-               err = -EINVAL;
-               goto out_free_cs;
-       }
-
-
-       pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
-       if (pdev) {
-               err = platform_device_add_resources(pdev, gpmc_nand_res,
-                                                   ARRAY_SIZE(gpmc_nand_res));
-               if (!err)
-                       pdev->dev.platform_data = gpmc_nand_data;
-       } else {
-               err = -ENOMEM;
-       }
-       if (err)
-               goto out_free_pdev;
-
-       err = platform_device_add(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "Unable to register NAND device\n");
-               goto out_free_pdev;
-       }
-
-       return 0;
-
-out_free_pdev:
-       platform_device_put(pdev);
-out_free_cs:
-       gpmc_cs_free(gpmc_nand_data->cs);
-
-       return err;
-}
index 8633c70..2944af8 100644 (file)
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
        return ret;
 }
 
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
 {
        int err;
        struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
        if (err < 0) {
                dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
                        gpmc_onenand_data->cs, err);
-               return;
+               return err;
        }
 
        gpmc_onenand_resource.end = gpmc_onenand_resource.start +
                                                        ONENAND_IO_SIZE - 1;
 
-       if (platform_device_register(&gpmc_onenand_device) < 0) {
+       err = platform_device_register(&gpmc_onenand_device);
+       if (err) {
                dev_err(dev, "Unable to register OneNAND device\n");
                gpmc_cs_free(gpmc_onenand_data->cs);
-               return;
        }
+
+       return err;
 }
index fe36ce2..4c6f14c 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <asm/assembler.h>
 
 #include "omap44xx.h"
 
@@ -66,7 +67,7 @@ wait_2:       ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        cmp     r0, r4
        bne     wait_2
        ldr     r12, =API_HYP_ENTRY
-       adr     r0, hyp_boot
+       badr    r0, hyp_boot
        smc     #0
 hyp_boot:
        b       omap_secondary_startup
index 56f917e..1435fee 100644 (file)
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
 };
 
 /* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
+       {
+               .pa_start       = OMAP34XX_SR1_BASE,
+               .pa_end         = OMAP34XX_SR1_BASE + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { },
+};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_sr1_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr1_addr_space,
        .user           = OCP_USER_MPU,
 };
 
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap36xx_sr1_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr1_addr_space,
        .user           = OCP_USER_MPU,
 };
 
 /* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
+       {
+               .pa_start       = OMAP34XX_SR2_BASE,
+               .pa_end         = OMAP34XX_SR2_BASE + SZ_1K - 1,
+               .flags          = ADDR_TYPE_RT,
+       },
+       { },
+};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap34xx_sr2_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr2_addr_space,
        .user           = OCP_USER_MPU,
 };
 
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
        .master         = &omap3xxx_l4_core_hwmod,
        .slave          = &omap36xx_sr2_hwmod,
        .clk            = "sr_l4_ick",
+       .addr           = omap3_sr2_addr_space,
        .user           = OCP_USER_MPU,
 };
 
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
  * Return: 0 if device named @dev_name is not likely to be accessible,
  * or 1 if it is likely to be accessible.
  */
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
-                                                      const char *dev_name)
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
+                                                       const char *dev_name)
 {
+       struct device_node *node;
+       bool available;
+
        if (!bus)
-               return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
+               return omap_type() == OMAP2_DEVICE_TYPE_GP;
 
-       if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
-               return 1;
+       node = of_get_child_by_name(bus, dev_name);
+       available = of_device_is_available(node);
+       of_node_put(node);
 
-       return 0;
+       return available;
 }
 
 int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
 
        if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
                r = omap_hwmod_register_links(h_sham);
-               if (r < 0)
+               if (r < 0) {
+                       of_node_put(bus);
                        return r;
+               }
        }
 
        if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
                r = omap_hwmod_register_links(h_aes);
-               if (r < 0)
+               if (r < 0) {
+                       of_node_put(bus);
                        return r;
+               }
        }
+       of_node_put(bus);
 
        /*
         * Register hwmod links specific to certain ES levels of a
index 3c2cb5d..0bb0e9c 100644 (file)
 394    common  pkey_mprotect           sys_pkey_mprotect
 395    common  pkey_alloc              sys_pkey_alloc
 396    common  pkey_free               sys_pkey_free
+397    common  statx                   sys_statx
index 8c7c244..3741859 100644 (file)
@@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
 
+config KEYS_COMPAT
+       def_bool y
+       depends on COMPAT && KEYS
+
 endmenu
 
 menu "Power management options"
index 9f9e203..bcb03fc 100644 (file)
        pcie0: pcie@20020000 {
                compatible = "brcm,iproc-pcie";
                reg = <0 0x20020000 0 0x1000>;
+               dma-coherent;
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
        pcie4: pcie@50020000 {
                compatible = "brcm,iproc-pcie";
                reg = <0 0x50020000 0 0x1000>;
+               dma-coherent;
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
        pcie8: pcie@60c00000 {
                compatible = "brcm,iproc-pcie-paxc";
                reg = <0 0x60c00000 0 0x1000>;
+               dma-coherent;
                linux,pci-domain = <8>;
 
                bus-range = <0x0 0x1>;
                              <0x61030000 0x100>;
                        reg-names = "amac_base", "idm_base", "nicpm_base";
                        interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
                        phy-handle = <&gphy0>;
                        phy-mode = "rgmii";
                        status = "disabled";
                        reg = <0x612c0000 0x445>;  /* PDC FS0 regs */
                        interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                        reg = <0x612e0000 0x445>;  /* PDC FS1 regs */
                        interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                        reg = <0x61300000 0x445>;  /* PDC FS2 regs */
                        interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                        reg = <0x61320000 0x445>;  /* PDC FS3 regs */
                        interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
+                       dma-coherent;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
                };
                sata: ahci@663f2000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
                        reg = <0x663f2000 0x1000>;
+                       dma-coherent;
                        reg-names = "ahci";
                        interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        compatible = "brcm,sdhci-iproc-cygnus";
                        reg = <0x66420000 0x100>;
                        interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
                        bus-width = <8>;
                        clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
                        status = "disabled";
                        compatible = "brcm,sdhci-iproc-cygnus";
                        reg = <0x66430000 0x100>;
                        interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
                        bus-width = <8>;
                        clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
                        status = "disabled";
index 05310ad..f31c48d 100644 (file)
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
 static inline bool system_uses_ttbr0_pan(void)
 {
        return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
-               !cpus_have_cap(ARM64_HAS_PAN);
+               !cpus_have_const_cap(ARM64_HAS_PAN);
 }
 
 #endif /* __ASSEMBLY__ */
index e78ac26..bdbeb06 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           394
+#define __NR_compat_syscalls           398
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index b7e8ef1..c66b51a 100644 (file)
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
 __SYSCALL(__NR_preadv2, compat_sys_preadv2)
 #define __NR_pwritev2 393
 __SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 394
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 395
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 396
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 397
+__SYSCALL(__NR_statx, sys_statx)
 
 /*
  * Please add new compat syscalls above this comment and update
index 75a0f8a..fd69108 100644 (file)
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
 }
 
 /**
- * cpu_suspend() - function to enter a low-power idle state
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
  * @arg: argument to pass to CPU suspend operations
  *
  * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
index 769f24e..d7e90d9 100644 (file)
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
        /*
         * The kernel Image should not extend across a 1GB/32MB/512MB alignment
         * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
-        * happens, increase the KASLR offset by the size of the kernel image.
+        * happens, increase the KASLR offset by the size of the kernel image
+        * rounded up by SWAPPER_BLOCK_SIZE.
         */
        if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
-           (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
-               offset = (offset + (u64)(_end - _text)) & mask;
+           (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+               u64 kimg_sz = _end - _text;
+               offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+                               & mask;
+       }
 
        if (IS_ENABLED(CONFIG_KASAN))
                /*
index 2a07aae..c5c4594 100644 (file)
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
        return 0;
 }
 
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-                                      unsigned long val, void *data)
-{
-       return NOTIFY_DONE;
-}
-
 static void __kprobes kprobe_handler(struct pt_regs *regs)
 {
        struct kprobe *p, *cur_kprobe;
index 55d1e92..687a358 100644 (file)
@@ -162,7 +162,7 @@ void __init kasan_init(void)
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
        vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
-                        pfn_to_nid(virt_to_pfn(_text)));
+                        pfn_to_nid(virt_to_pfn(lm_alias(_text))));
 
        /*
         * vmemmap_populate() has populated the shadow region that covers the
index 5fcb9ac..f0a5d8b 100644 (file)
@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
        return val;
 }
 
-#define xchg(ptr, with) \
-       ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, with)                                                \
+       ({                                                              \
+               (__typeof__(*(ptr))) __xchg((unsigned long)(with),      \
+                                           (ptr),                      \
+                                           sizeof(*(ptr)));            \
+       })
 
 #endif /* __ASM_OPENRISC_CMPXCHG_H */
index 140faa1..1311e6b 100644 (file)
@@ -211,7 +211,7 @@ do {                                                                        \
        case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;         \
        case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;         \
        case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;         \
-       case 8: __get_user_asm2(x, ptr, retval);                        \
+       case 8: __get_user_asm2(x, ptr, retval); break;                 \
        default: (x) = __get_user_bad();                                \
        }                                                               \
 } while (0)
index 5c4695d..ee3e604 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/hardirq.h>
 #include <asm/delay.h>
 #include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 
 #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
 
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
 DECLARE_EXPORT(__ashrdi3);
 DECLARE_EXPORT(__ashldi3);
 DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ucmpdi2);
 
+EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(memset);
index 828a291..f8da545 100644 (file)
@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
 }
 
 void (*pm_power_off) (void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
 
 /*
  * When a process does an "exec", machine state like FPU and debug
index 19c9c3c..c7e15cc 100644 (file)
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
 
 #define flush_kernel_dcache_range(start,size) \
        flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates.  Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-       unsigned long start = (unsigned long)vaddr;
-
-       flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-       unsigned long start = (unsigned long)vaddr;
-       void *cursor = vaddr;
 
-       for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
-               struct page *page = vmalloc_to_page(cursor);
-
-               if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
-                       flush_kernel_dcache_page(page);
-       }
-       flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 #define flush_cache_vmap(start, end)           flush_cache_all()
 #define flush_cache_vunmap(start, end)         flush_cache_all()
index fb4382c..edfbf9d 100644 (file)
@@ -32,7 +32,8 @@
  * that put_user is the same as __put_user, etc.
  */
 
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size)   \
+       ( (uaddr) == (uaddr) )
 
 #define put_user __put_user
 #define get_user __get_user
index 6b0741e..667c994 100644 (file)
 #define __NR_copy_file_range   (__NR_Linux + 346)
 #define __NR_preadv2           (__NR_Linux + 347)
 #define __NR_pwritev2          (__NR_Linux + 348)
+#define __NR_statx             (__NR_Linux + 349)
 
-#define __NR_Linux_syscalls    (__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls    (__NR_statx + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 0dc72d5..c32a090 100644 (file)
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
                __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
        }
 }
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+       unsigned long start = (unsigned long)vaddr;
+
+       if ((unsigned long)size > parisc_cache_flush_threshold)
+               flush_data_cache();
+       else
+               flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+       unsigned long start = (unsigned long)vaddr;
+
+       if ((unsigned long)size > parisc_cache_flush_threshold)
+               flush_data_cache();
+       else
+               flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
index a0ecdb4..c66c943 100644 (file)
@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                         */
                        *loc = fsel(val, addend); 
                        break;
+               case R_PARISC_SECREL32:
+                       /* 32-bit section relative address. */
+                       *loc = fsel(val, addend);
+                       break;
                case R_PARISC_DPREL21L:
                        /* left 21 bit of relative address */
                        val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                         */
                        *loc = fsel(val, addend); 
                        break;
+               case R_PARISC_SECREL32:
+                       /* 32-bit section relative address. */
+                       *loc = fsel(val, addend);
+                       break;
                case R_PARISC_FPTR64:
                        /* 64-bit function address */
                        if(in_local(me, (void *)(val + addend))) {
index e282a51..6017a5a 100644 (file)
@@ -39,7 +39,7 @@
  *  the PDC INTRIGUE calls.  This is done to eliminate bugs introduced
  *  in various PDC revisions.  The code is much more maintainable
  *  and reliable this way vs having to debug on every version of PDC
- *  on every box. 
+ *  on every box.
  */
 
 #include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
 static int perf_release(struct inode *inode, struct file *file);
 static int perf_open(struct inode *inode, struct file *file);
 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
-       loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+       size_t count, loff_t *ppos);
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 static void perf_start_counters(void);
 static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
 /*
  * configure:
  *
- * Configure the cpu with a given data image.  First turn off the counters, 
+ * Configure the cpu with a given data image.  First turn off the counters,
  * then download the image, then turn the counters back on.
  */
 static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
        error = perf_stop_counters(raddr);
        if (error != 0) {
                printk("perf_config: perf_stop_counters = %ld\n", error);
-               return -EINVAL; 
+               return -EINVAL;
        }
 
 printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
        error = perf_write_image((uint64_t *)image_ptr);
        if (error != 0) {
                printk("perf_config: DOWNLOAD = %ld\n", error);
-               return -EINVAL; 
+               return -EINVAL;
        }
 
 printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
 }
 
 /*
- * Open the device and initialize all of its memory.  The device is only 
+ * Open the device and initialize all of its memory.  The device is only
  * opened once, but can be "queried" by multiple processes that know its
  * file descriptor.
  */
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
  * called on the processor that the download should happen
  * on.
  */
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
-       loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+       size_t count, loff_t *ppos)
 {
        size_t image_size;
        uint32_t image_type;
        uint32_t interface_type;
        uint32_t test;
 
-       if (perf_processor_interface == ONYX_INTF) 
+       if (perf_processor_interface == ONYX_INTF)
                image_size = PCXU_IMAGE_SIZE;
-       else if (perf_processor_interface == CUDA_INTF) 
+       else if (perf_processor_interface == CUDA_INTF)
                image_size = PCXW_IMAGE_SIZE;
-       else 
+       else
                return -EFAULT;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
 
        /* First check the machine type is correct for
           the requested image */
-        if (((perf_processor_interface == CUDA_INTF) &&
-                      (interface_type != CUDA_INTF)) ||
-           ((perf_processor_interface == ONYX_INTF) &&
-                      (interface_type != ONYX_INTF))) 
+       if (((perf_processor_interface == CUDA_INTF) &&
+                       (interface_type != CUDA_INTF)) ||
+               ((perf_processor_interface == ONYX_INTF) &&
+                       (interface_type != ONYX_INTF)))
                return -EINVAL;
 
        /* Next check to make sure the requested image
           is valid */
-       if (((interface_type == CUDA_INTF) && 
+       if (((interface_type == CUDA_INTF) &&
                       (test >= MAX_CUDA_IMAGES)) ||
-           ((interface_type == ONYX_INTF) && 
-                      (test >= MAX_ONYX_IMAGES))) 
+           ((interface_type == ONYX_INTF) &&
+                      (test >= MAX_ONYX_IMAGES)))
                return -EINVAL;
 
        /* Copy the image into the processor */
-       if (interface_type == CUDA_INTF) 
+       if (interface_type == CUDA_INTF)
                return perf_config(cuda_images[test]);
        else
                return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
 static void perf_patch_images(void)
 {
 #if 0 /* FIXME!! */
-/* 
+/*
  * NOTE:  this routine is VERY specific to the current TLB image.
  * If the image is changed, this routine might also need to be changed.
  */
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
        extern void $i_dtlb_miss_2_0();
        extern void PA2_0_iva();
 
-       /* 
+       /*
         * We can only use the lower 32-bits, the upper 32-bits should be 0
-        * anyway given this is in the kernel 
+        * anyway given this is in the kernel
         */
        uint32_t itlb_addr  = (uint32_t)&($i_itlb_miss_2_0);
        uint32_t dtlb_addr  = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
 
        if (perf_processor_interface == ONYX_INTF) {
                /* clear last 2 bytes */
-               onyx_images[TLBMISS][15] &= 0xffffff00;  
+               onyx_images[TLBMISS][15] &= 0xffffff00;
                /* set 2 bytes */
                onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
                onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
                onyx_images[TLBMISS][17] = itlb_addr;
 
                /* clear last 2 bytes */
-               onyx_images[TLBHANDMISS][15] &= 0xffffff00;  
+               onyx_images[TLBHANDMISS][15] &= 0xffffff00;
                /* set 2 bytes */
                onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
                onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
                onyx_images[TLBHANDMISS][17] = itlb_addr;
 
                /* clear last 2 bytes */
-               onyx_images[BIG_CPI][15] &= 0xffffff00;  
+               onyx_images[BIG_CPI][15] &= 0xffffff00;
                /* set 2 bytes */
                onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
                onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
 
        } else if (perf_processor_interface == CUDA_INTF) {
                /* Cuda interface */
-               cuda_images[TLBMISS][16] =  
+               cuda_images[TLBMISS][16] =
                        (cuda_images[TLBMISS][16]&0xffff0000) |
                        ((dtlb_addr >> 8)&0x0000ffff);
-               cuda_images[TLBMISS][17] = 
+               cuda_images[TLBMISS][17] =
                        ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
                cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
 
-               cuda_images[TLBHANDMISS][16] = 
+               cuda_images[TLBHANDMISS][16] =
                        (cuda_images[TLBHANDMISS][16]&0xffff0000) |
                        ((dtlb_addr >> 8)&0x0000ffff);
-               cuda_images[TLBHANDMISS][17] = 
+               cuda_images[TLBHANDMISS][17] =
                        ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
                cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
 
-               cuda_images[BIG_CPI][16] = 
+               cuda_images[BIG_CPI][16] =
                        (cuda_images[BIG_CPI][16]&0xffff0000) |
                        ((dtlb_addr >> 8)&0x0000ffff);
-               cuda_images[BIG_CPI][17] = 
+               cuda_images[BIG_CPI][17] =
                        ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
                cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
        } else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
 
 /*
  * ioctl routine
- * All routines effect the processor that they are executed on.  Thus you 
+ * All routines effect the processor that they are executed on.  Thus you
  * must be running on the processor that you wish to change.
  */
 
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        }
 
                        /* copy out the Counters */
-                       if (copy_to_user((void __user *)arg, raddr, 
+                       if (copy_to_user((void __user *)arg, raddr,
                                        sizeof (raddr)) != 0) {
                                error =  -EFAULT;
                                break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
        .open = perf_open,
        .release = perf_release
 };
-       
+
 static struct miscdevice perf_dev = {
        MISC_DYNAMIC_MINOR,
        PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
                /* OR sticky2 (bit 1496) to counter2 bit 32 */
                tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
                raddr[2] = (uint32_t)tmp64;
-               
+
                /* Counter3 is bits 1497 to 1528 */
                tmp64 =  (userbuf[23] >> 7) & 0x00000000ffffffff;
                /* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
                userbuf[22] = 0;
                userbuf[23] = 0;
 
-               /* 
+               /*
                 * Write back the zeroed bytes + the image given
                 * the read was destructive.
                 */
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
        } else {
 
                /*
-                * Read RDR-15 which contains the counters and sticky bits 
+                * Read RDR-15 which contains the counters and sticky bits
                 */
                if (!perf_rdr_read_ubuf(15, userbuf)) {
                        return -13;
                }
 
-               /* 
+               /*
                 * Clear out the counters
                 */
                perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
                raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
                raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
        }
+
        return 0;
 }
 
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t      rdr_num, uint64_t *buffer)
        i = tentry->num_words;
        while (i--) {
                buffer[i] = 0;
-       }       
+       }
 
        /* Check for bits an even number of 64 */
        if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
        }
 
        runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+       if (!runway) {
+               pr_err("perf_write_image: ioremap failed!\n");
+               return -ENOMEM;
+       }
 
        /* Merge intrigue bits into Runway STATUS 0 */
        tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
-       __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 
+       __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
                     runway + RUNWAY_STATUS);
-       
+
        /* Write RUNWAY DEBUG registers */
        for (i = 0; i < 8; i++) {
                __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
        }
 
-       return 0; 
+       return 0;
 }
 
 /*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
                        perf_rdr_shift_out_U(rdr_num, buffer[i]);
                } else {
                        perf_rdr_shift_out_W(rdr_num, buffer[i]);
-               }       
+               }
        }
 printk("perf_rdr_write done\n");
 }
index 06f7ca7..b76f503 100644 (file)
@@ -142,6 +142,8 @@ void machine_power_off(void)
 
        printk(KERN_EMERG "System shut down completed.\n"
               "Please power this system off now.");
+
+       for (;;);
 }
 
 void (*pm_power_off)(void) = machine_power_off;
index 3cfef1d..44aeaa9 100644 (file)
        ENTRY_SAME(copy_file_range)
        ENTRY_COMP(preadv2)
        ENTRY_COMP(pwritev2)
+       ENTRY_SAME(statx)
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 4b369d8..1c94708 100644 (file)
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
 COMPAT_SYS_SPU(preadv2)
 COMPAT_SYS_SPU(pwritev2)
 SYSCALL(kexec_file_load)
+SYSCALL(statx)
index eb1acee..9ba11db 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            383
+#define NR_syscalls            384
 
 #define __NR__exit __NR_exit
 
index 2f26335..b85f142 100644 (file)
 #define __NR_preadv2           380
 #define __NR_pwritev2          381
 #define __NR_kexec_file_load   382
+#define __NR_statx             383
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 251060c..8b1fe89 100644 (file)
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
        mmu_hash_ops.flush_hash_range    = pSeries_lpar_flush_hash_range;
        mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
        mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
-       mmu_hash_ops.resize_hpt          = pseries_lpar_resize_hpt;
+
+       if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+               mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
 }
 
 void radix_init_pseries(void)
index 349d4d1..2aa1ad1 100644 (file)
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
 
 static void refresh_pce(void *ignored)
 {
-       if (current->mm)
-               load_mm_cr4(current->mm);
+       if (current->active_mm)
+               load_mm_cr4(current->active_mm);
 }
 
 static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return;
 
+       /*
+        * This function relies on not being called concurrently in two
+        * tasks in the same mm.  Otherwise one task could observe
+        * perf_rdpmc_allowed > 1 and return all the way back to
+        * userspace with CR4.PCE clear while another task is still
+        * doing on_each_cpu_mask() to propagate CR4.PCE.
+        *
+        * For now, this can't happen because all callers hold mmap_sem
+        * for write.  If this changes, we'll need a different solution.
+        */
+       lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+
        if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
                on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
 }
index 72277b1..50d35e3 100644 (file)
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
        *(tmp + 1) = 0;
 }
 
-#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
-               defined(CONFIG_PARAVIRT))
 static inline void native_pud_clear(pud_t *pudp)
 {
 }
-#endif
 
 static inline void pud_clear(pud_t *pudp)
 {
index 1cfb36b..585ee0d 100644 (file)
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
 # define set_pud(pudp, pud)            native_set_pud(pudp, pud)
 #endif
 
-#ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
 #define pud_clear(pud)                 native_pud_clear(pud)
 #endif
 
index ae32838..b2879cc 100644 (file)
@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
                return -EINVAL;
        }
 
+       if (!enabled) {
+               ++disabled_cpus;
+               return -EINVAL;
+       }
+
        if (boot_cpu_physical_apicid != -1U)
                ver = boot_cpu_apic_version;
 
-       cpu = __generic_processor_info(id, ver, enabled);
+       cpu = generic_processor_info(id, ver);
        if (cpu >= 0)
                early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
 
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #include <acpi/processor.h>
 
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 {
 #ifdef CONFIG_ACPI_NUMA
        int nid;
index aee7ded..8ccb7ef 100644 (file)
@@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
        return nr_logical_cpuids++;
 }
 
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
 {
        int cpu, max = nr_cpu_ids;
        bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
        if (num_processors >= nr_cpu_ids) {
                int thiscpu = max + disabled_cpus;
 
-               if (enabled) {
-                       pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
-                                  "reached. Processor %d/0x%x ignored.\n",
-                                  max, thiscpu, apicid);
-               }
+               pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+                          "reached. Processor %d/0x%x ignored.\n",
+                          max, thiscpu, apicid);
 
                disabled_cpus++;
                return -EINVAL;
@@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
                apic->x86_32_early_logical_apicid(cpu);
 #endif
        set_cpu_possible(cpu, true);
-
-       if (enabled) {
-               num_processors++;
-               physid_set(apicid, phys_cpu_present_map);
-               set_cpu_present(cpu, true);
-       } else {
-               disabled_cpus++;
-       }
+       physid_set(apicid, phys_cpu_present_map);
+       set_cpu_present(cpu, true);
+       num_processors++;
 
        return cpu;
 }
 
-int generic_processor_info(int apicid, int version)
-{
-       return __generic_processor_info(apicid, version, true);
-}
-
 int hard_smp_processor_id(void)
 {
        return read_apic_id();
index c05509d..9ac2a5c 100644 (file)
@@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
        if (atomic_dec_and_test(&rdtgrp->waitcount) &&
            (rdtgrp->flags & RDT_DELETED)) {
                kernfs_unbreak_active_protection(kn);
-               kernfs_put(kn);
+               kernfs_put(rdtgrp->kn);
                kfree(rdtgrp);
        } else {
                kernfs_unbreak_active_protection(kn);
index 54a2372..b5785c1 100644 (file)
@@ -4,6 +4,7 @@
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  */
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
index f088ea4..a723ae9 100644 (file)
@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
        spin_lock_irqsave(&desc->lock, flags);
 
        /*
-        * most handlers of type NMI_UNKNOWN never return because
-        * they just assume the NMI is theirs.  Just a sanity check
-        * to manage expectations
+        * Indicate if there are multiple registrations on the
+        * internal NMI handler call chains (SERR and IO_CHECK).
         */
-       WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
        WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
        WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
 
index 4f7a983..c73a7f9 100644 (file)
@@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
         * the refined calibration and directly register it as a clocksource.
         */
        if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
+               if (boot_cpu_has(X86_FEATURE_ART))
+                       art_related_clocksource = &clocksource_tsc;
                clocksource_register_khz(&clocksource_tsc, tsc_khz);
                return 0;
        }
index 478d15d..0833926 100644 (file)
@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
        return sizeof(*regs);
 }
 
+#ifdef CONFIG_X86_32
+#define GCC_REALIGN_WORDS 3
+#else
+#define GCC_REALIGN_WORDS 1
+#endif
+
 static bool is_last_task_frame(struct unwind_state *state)
 {
-       unsigned long bp = (unsigned long)state->bp;
-       unsigned long regs = (unsigned long)task_pt_regs(state->task);
+       unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
+       unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
 
        /*
         * We have to check for the last task frame at two different locations
         * because gcc can occasionally decide to realign the stack pointer and
-        * change the offset of the stack frame by a word in the prologue of a
-        * function called by head/entry code.
+        * change the offset of the stack frame in the prologue of a function
+        * called by head/entry code.  Examples:
+        *
+        * <start_secondary>:
+        *      push   %edi
+        *      lea    0x8(%esp),%edi
+        *      and    $0xfffffff8,%esp
+        *      pushl  -0x4(%edi)
+        *      push   %ebp
+        *      mov    %esp,%ebp
+        *
+        * <x86_64_start_kernel>:
+        *      lea    0x8(%rsp),%r10
+        *      and    $0xfffffffffffffff0,%rsp
+        *      pushq  -0x8(%r10)
+        *      push   %rbp
+        *      mov    %rsp,%rbp
+        *
+        * Note that after aligning the stack, it pushes a duplicate copy of
+        * the return address before pushing the frame pointer.
         */
-       return bp == regs - FRAME_HEADER_SIZE ||
-              bp == regs - FRAME_HEADER_SIZE - sizeof(long);
+       return (state->bp == last_bp ||
+               (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
 }
 
 /*
index 8d63d7a..4c90cfd 100644 (file)
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
 #define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
index 5126dfd..cd44ae7 100644 (file)
@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
  * we might run off the end of the bounds table if we are on
  * a 64-bit kernel and try to get 8 bytes.
  */
-int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
                long __user *bd_entry_ptr)
 {
        u32 bd_entry_32;
index a7dbec4..3dbde04 100644 (file)
@@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 # MISC Devices
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
 obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
new file mode 100644 (file)
index 0000000..a6c3705
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Intel Merrifield power button support
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct resource mrfld_power_btn_resources[] = {
+       {
+               .flags          = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device mrfld_power_btn_dev = {
+       .name           = "msic_power_btn",
+       .id             = PLATFORM_DEVID_NONE,
+       .num_resources  = ARRAY_SIZE(mrfld_power_btn_resources),
+       .resource       = mrfld_power_btn_resources,
+};
+
+static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
+                                            unsigned long code, void *data)
+{
+       if (code == SCU_DOWN) {
+               platform_device_unregister(&mrfld_power_btn_dev);
+               return 0;
+       }
+
+       return platform_device_register(&mrfld_power_btn_dev);
+}
+
+static struct notifier_block mrfld_power_btn_scu_notifier = {
+       .notifier_call  = mrfld_power_btn_scu_status_change,
+};
+
+static int __init register_mrfld_power_btn(void)
+{
+       if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+               return -ENODEV;
+
+       /*
+        * We need to be sure that the SCU IPC is ready before
+        * PMIC power button device can be registered:
+        */
+       intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
+
+       return 0;
+}
+arch_initcall(register_mrfld_power_btn);
+
+static void __init *mrfld_power_btn_platform_data(void *info)
+{
+       struct resource *res = mrfld_power_btn_resources;
+       struct sfi_device_table_entry *pentry = info;
+
+       res->start = res->end = pentry->irq;
+       return NULL;
+}
+
+static const struct devs_id mrfld_power_btn_dev_id __initconst = {
+       .name                   = "bcove_power_btn",
+       .type                   = SFI_DEV_TYPE_IPC,
+       .delay                  = 1,
+       .msic                   = 1,
+       .get_platform_data      = &mrfld_power_btn_platform_data,
+};
+
+sfi_device(mrfld_power_btn_dev_id);
index 86edd1e..9e304e2 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/intel_scu_ipc.h>
 #include <asm/io_apic.h>
 
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
 
 static struct platform_device wdt_dev = {
        .name = "intel_mid_wdt",
index e793fe5..e42978d 100644 (file)
 
 #include "intel_mid_weak_decls.h"
 
-static void penwell_arch_setup(void);
-/* penwell arch ops */
-static struct intel_mid_ops penwell_ops = {
-       .arch_setup = penwell_arch_setup,
-};
-
-static void mfld_power_off(void)
-{
-}
-
 static unsigned long __init mfld_calibrate_tsc(void)
 {
        unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
 static void __init penwell_arch_setup(void)
 {
        x86_platform.calibrate_tsc = mfld_calibrate_tsc;
-       pm_power_off = mfld_power_off;
 }
 
+static struct intel_mid_ops penwell_ops = {
+       .arch_setup = penwell_arch_setup,
+};
+
 void *get_penwell_ops(void)
 {
        return &penwell_ops;
index 4467a80..0143135 100644 (file)
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
 
 void __weak arch_unregister_cpu(int cpu) {}
 
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
-       return -ENODEV;
-}
-
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
        unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->acpi_id = value;
        }
 
+       if (acpi_duplicate_processor_id(pr->acpi_id)) {
+               dev_err(&device->dev,
+                       "Failed to get unique processor _UID (0x%x)\n",
+                       pr->acpi_id);
+               return -ENODEV;
+       }
+
        pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
                                        pr->acpi_id);
        if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
 static int nr_unique_ids __initdata;
 
 /* The number of the duplicate processor IDs */
-static int nr_duplicate_ids __initdata;
+static int nr_duplicate_ids;
 
 /* Used to store the unique processor IDs */
 static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
 };
 
 /* Used to store the duplicate processor IDs */
-static int duplicate_processor_ids[] __initdata = {
+static int duplicate_processor_ids[] = {
        [0 ... NR_CPUS - 1] = -1,
 };
 
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
                                                  void **rv)
 {
        acpi_status status;
+       acpi_object_type acpi_type;
+       unsigned long long uid;
        union acpi_object object = { 0 };
        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
 
-       status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+       status = acpi_get_type(handle, &acpi_type);
        if (ACPI_FAILURE(status))
-               acpi_handle_info(handle, "Not get the processor object\n");
-       else
-               processor_validated_ids_update(object.processor.proc_id);
+               return false;
+
+       switch (acpi_type) {
+       case ACPI_TYPE_PROCESSOR:
+               status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+               if (ACPI_FAILURE(status))
+                       goto err;
+               uid = object.processor.proc_id;
+               break;
+
+       case ACPI_TYPE_DEVICE:
+               status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+               if (ACPI_FAILURE(status))
+                       goto err;
+               break;
+       default:
+               goto err;
+       }
+
+       processor_validated_ids_update(uid);
+       return true;
+
+err:
+       acpi_handle_info(handle, "Invalid processor object\n");
+       return false;
 
-       return AE_OK;
 }
 
-static void __init acpi_processor_check_duplicates(void)
+void __init acpi_processor_check_duplicates(void)
 {
-       /* Search all processor nodes in ACPI namespace */
+       /* check the correctness for all processors in ACPI namespace */
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                                                ACPI_UINT32_MAX,
                                                acpi_processor_ids_walk,
                                                NULL, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
+                                               NULL, NULL);
 }
 
-bool __init acpi_processor_validate_proc_id(int proc_id)
+bool acpi_duplicate_processor_id(int proc_id)
 {
        int i;
 
index 80cb5eb..34fbe02 100644 (file)
@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
        acpi_wakeup_device_init();
        acpi_debugger_init();
        acpi_setup_sb_notify_handler();
-       acpi_set_processor_mapping();
        return 0;
 }
 
index 611a558..b933061 100644 (file)
@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
 }
 
 static int map_lapic_id(struct acpi_subtable_header *entry,
-                u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
+                u32 acpi_id, phys_cpuid_t *apic_id)
 {
        struct acpi_madt_local_apic *lapic =
                container_of(entry, struct acpi_madt_local_apic, header);
 
-       if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
+       if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
 }
 
 static int map_x2apic_id(struct acpi_subtable_header *entry,
-               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
-               bool ignore_disabled)
+               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
 {
        struct acpi_madt_local_x2apic *apic =
                container_of(entry, struct acpi_madt_local_x2apic, header);
 
-       if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
+       if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
 }
 
 static int map_lsapic_id(struct acpi_subtable_header *entry,
-               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
-               bool ignore_disabled)
+               int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
 {
        struct acpi_madt_local_sapic *lsapic =
                container_of(entry, struct acpi_madt_local_sapic, header);
 
-       if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+       if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
  * Retrieve the ARM CPU physical identifier (MPIDR)
  */
 static int map_gicc_mpidr(struct acpi_subtable_header *entry,
-               int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
-               bool ignore_disabled)
+               int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
 {
        struct acpi_madt_generic_interrupt *gicc =
            container_of(entry, struct acpi_madt_generic_interrupt, header);
 
-       if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
+       if (!(gicc->flags & ACPI_MADT_ENABLED))
                return -ENODEV;
 
        /* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
 }
 
 static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
-                                  int type, u32 acpi_id, bool ignore_disabled)
+                                  int type, u32 acpi_id)
 {
        unsigned long madt_end, entry;
        phys_cpuid_t phys_id = PHYS_CPUID_INVALID;      /* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
                struct acpi_subtable_header *header =
                        (struct acpi_subtable_header *)entry;
                if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-                       if (!map_lapic_id(header, acpi_id, &phys_id,
-                                         ignore_disabled))
+                       if (!map_lapic_id(header, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-                       if (!map_x2apic_id(header, type, acpi_id, &phys_id,
-                                          ignore_disabled))
+                       if (!map_x2apic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-                       if (!map_lsapic_id(header, type, acpi_id, &phys_id,
-                                          ignore_disabled))
+                       if (!map_lsapic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
-                       if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
-                                           ignore_disabled))
+                       if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
                                break;
                }
                entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
        if (!madt)
                return PHYS_CPUID_INVALID;
 
-       rv = map_madt_entry(madt, 1, acpi_id, true);
+       rv = map_madt_entry(madt, 1, acpi_id);
 
        acpi_put_table((struct acpi_table_header *)madt);
 
        return rv;
 }
 
-static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
-                                 bool ignore_disabled)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
 
        header = (struct acpi_subtable_header *)obj->buffer.pointer;
        if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
-               map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
+               map_lapic_id(header, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
-               map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+               map_lsapic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
-               map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+               map_x2apic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
-               map_gicc_mpidr(header, type, acpi_id, &phys_id,
-                              ignore_disabled);
+               map_gicc_mpidr(header, type, acpi_id, &phys_id);
 
 exit:
        kfree(buffer.pointer);
        return phys_id;
 }
 
-static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
-                                      u32 acpi_id, bool ignore_disabled)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
 {
        phys_cpuid_t phys_id;
 
-       phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
+       phys_id = map_mat_entry(handle, type, acpi_id);
        if (invalid_phys_cpuid(phys_id))
-               phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
-                                          ignore_disabled);
+               phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
 
        return phys_id;
 }
 
-phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
-{
-       return __acpi_get_phys_id(handle, type, acpi_id, true);
-}
-
 int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
 {
 #ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
-       int type, id;
-       u32 acpi_id;
-       acpi_status status;
-       acpi_object_type acpi_type;
-       unsigned long long tmp;
-       union acpi_object object = { 0 };
-       struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
-       status = acpi_get_type(handle, &acpi_type);
-       if (ACPI_FAILURE(status))
-               return false;
-
-       switch (acpi_type) {
-       case ACPI_TYPE_PROCESSOR:
-               status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
-               if (ACPI_FAILURE(status))
-                       return false;
-               acpi_id = object.processor.proc_id;
-
-               /* validate the acpi_id */
-               if(acpi_processor_validate_proc_id(acpi_id))
-                       return false;
-               break;
-       case ACPI_TYPE_DEVICE:
-               status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
-               if (ACPI_FAILURE(status))
-                       return false;
-               acpi_id = tmp;
-               break;
-       default:
-               return false;
-       }
-
-       type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
-       *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
-       id = acpi_map_cpuid(*phys_id, acpi_id);
-
-       if (id < 0)
-               return false;
-       *cpuid = id;
-       return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
-                          void **rv)
-{
-       phys_cpuid_t phys_id;
-       int cpu_id;
-
-       if (!map_processor(handle, &phys_id, &cpu_id))
-               return AE_ERROR;
-
-       acpi_map_cpu2node(handle, cpu_id, phys_id);
-       return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
-       /* Set persistent cpu <-> node mapping for all processors. */
-       acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX, set_processor_node_mapping,
-                           NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
                         u64 *phys_addr, int *ioapic_id)
index 684bda4..6bb60fb 100644 (file)
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
        return restart_syscall();
 }
 
-void assert_held_device_hotplug(void)
-{
-       lockdep_assert_held(&device_hotplug_lock);
-}
-
 #ifdef CONFIG_BLOCK
 static inline int device_is_not_partition(struct device *dev)
 {
index c2c14a1..08e0545 100644 (file)
@@ -344,7 +344,8 @@ config BT_WILINK
 
 config BT_QCOMSMD
        tristate "Qualcomm SMD based HCI support"
-       depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
+       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
        select BT_QCA
        help
          Qualcomm SMD based HCI driver.
index 4a99ac7..9959c76 100644 (file)
@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
 struct amd768_priv {
        void __iomem *iobase;
        struct pci_dev *pcidev;
+       u32 pmbase;
 };
 
 static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@ found:
        if (pmbase == 0)
                return -EIO;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
-       if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
-                               PMBASE_SIZE, DRV_NAME)) {
+       if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
                dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
                        pmbase + 0xF0);
-               return -EBUSY;
+               err = -EBUSY;
+               goto out;
        }
 
-       priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
-                       PMBASE_SIZE);
+       priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
        if (!priv->iobase) {
                pr_err(DRV_NAME "Cannot map ioport\n");
-               return -ENOMEM;
+               err = -EINVAL;
+               goto err_iomap;
        }
 
        amd_rng.priv = (unsigned long)priv;
+       priv->pmbase = pmbase;
        priv->pcidev = pdev;
 
        pr_info(DRV_NAME " detected\n");
-       return devm_hwrng_register(&pdev->dev, &amd_rng);
+       err = hwrng_register(&amd_rng);
+       if (err) {
+               pr_err(DRV_NAME " registering failed (%d)\n", err);
+               goto err_hwrng;
+       }
+       return 0;
+
+err_hwrng:
+       ioport_unmap(priv->iobase);
+err_iomap:
+       release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+       kfree(priv);
+       return err;
 }
 
 static void __exit mod_exit(void)
 {
+       struct amd768_priv *priv;
+
+       priv = (struct amd768_priv *)amd_rng.priv;
+
+       hwrng_unregister(&amd_rng);
+
+       ioport_unmap(priv->iobase);
+
+       release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+       kfree(priv);
 }
 
 module_init(mod_init);
index e7a2459..e1d421a 100644 (file)
@@ -31,6 +31,9 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 
+
+#define PFX    KBUILD_MODNAME ": "
+
 #define GEODE_RNG_DATA_REG   0x50
 #define GEODE_RNG_STATUS_REG 0x54
 
@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
 
 static int __init mod_init(void)
 {
+       int err = -ENODEV;
        struct pci_dev *pdev = NULL;
        const struct pci_device_id *ent;
        void __iomem *mem;
@@ -89,27 +93,43 @@ static int __init mod_init(void)
 
        for_each_pci_dev(pdev) {
                ent = pci_match_id(pci_tbl, pdev);
-               if (ent) {
-                       rng_base = pci_resource_start(pdev, 0);
-                       if (rng_base == 0)
-                               return -ENODEV;
-
-                       mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
-                       if (!mem)
-                               return -ENOMEM;
-                       geode_rng.priv = (unsigned long)mem;
-
-                       pr_info("AMD Geode RNG detected\n");
-                       return devm_hwrng_register(&pdev->dev, &geode_rng);
-               }
+               if (ent)
+                       goto found;
        }
-
        /* Device not found. */
-       return -ENODEV;
+       goto out;
+
+found:
+       rng_base = pci_resource_start(pdev, 0);
+       if (rng_base == 0)
+               goto out;
+       err = -ENOMEM;
+       mem = ioremap(rng_base, 0x58);
+       if (!mem)
+               goto out;
+       geode_rng.priv = (unsigned long)mem;
+
+       pr_info("AMD Geode RNG detected\n");
+       err = hwrng_register(&geode_rng);
+       if (err) {
+               pr_err(PFX "RNG registering failed (%d)\n",
+                      err);
+               goto err_unmap;
+       }
+out:
+       return err;
+
+err_unmap:
+       iounmap(mem);
+       goto out;
 }
 
 static void __exit mod_exit(void)
 {
+       void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+       hwrng_unregister(&geode_rng);
+       iounmap(mem);
 }
 
 module_init(mod_init);
index 745844e..d4ca996 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/atmel_tc.h>
-#include <linux/sched_clock.h>
 
 
 /*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
        return (upper << 16) | lower;
 }
 
-static u32 tc_get_cv32(void)
-{
-       return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
 static u64 tc_get_cycles32(struct clocksource *cs)
 {
-       return tc_get_cv32();
+       return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
 }
 
 static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static u64 notrace tc_read_sched_clock(void)
-{
-       return tc_get_cv32();
-}
-
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
 struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
                clksrc.read = tc_get_cycles32;
                /* setup ony channel 0 */
                tcb_setup_single_chan(tc, best_divisor_idx);
-
-               /* register sched_clock on chips with single 32 bit counter */
-               sched_clock_register(tc_read_sched_clock, 32, divided_rate);
        } else {
                /* tclib will give us three clocks no matter what the
                 * underlying platform supports.
index 38b9fdf..5dbdd26 100644 (file)
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
                                        char *buf)
 {
        unsigned int cur_freq = __cpufreq_get(policy);
-       if (!cur_freq)
-               return sprintf(buf, "<unknown>");
-       return sprintf(buf, "%u\n", cur_freq);
+
+       if (cur_freq)
+               return sprintf(buf, "%u\n", cur_freq);
+
+       return sprintf(buf, "<unknown>\n");
 }
 
 /**
@@ -1182,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu)
                for_each_cpu(j, policy->related_cpus)
                        per_cpu(cpufreq_cpu_data, j) = policy;
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+       } else {
+               policy->min = policy->user_policy.min;
+               policy->max = policy->user_policy.max;
        }
 
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
index 3d37219..283491f 100644 (file)
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
        return div64_u64(x << EXT_FRAC_BITS, y);
 }
 
+static inline int32_t percent_ext_fp(int percent)
+{
+       return div_ext_fp(percent, 100);
+}
+
 /**
  * struct sample -     Store performance sample
  * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
@@ -359,9 +364,7 @@ static bool driver_registered __read_mostly;
 static bool acpi_ppc;
 #endif
 
-static struct perf_limits performance_limits;
-static struct perf_limits powersave_limits;
-static struct perf_limits *limits;
+static struct perf_limits global;
 
 static void intel_pstate_init_limits(struct perf_limits *limits)
 {
@@ -372,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
        limits->max_sysfs_pct = 100;
 }
 
-static void intel_pstate_set_performance_limits(struct perf_limits *limits)
-{
-       intel_pstate_init_limits(limits);
-       limits->min_perf_pct = 100;
-       limits->min_perf = int_ext_tofp(1);
-       limits->min_sysfs_pct = 100;
-}
-
 static DEFINE_MUTEX(intel_pstate_driver_lock);
 static DEFINE_MUTEX(intel_pstate_limits_lock);
 
@@ -502,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
         * correct max turbo frequency based on the turbo state.
         * Also need to convert to MHz as _PSS freq is in MHz.
         */
-       if (!limits->turbo_disabled)
+       if (!global.turbo_disabled)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
@@ -621,7 +616,7 @@ static inline void update_turbo_state(void)
 
        cpu = all_cpu_data[0];
        rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
-       limits->turbo_disabled =
+       global.turbo_disabled =
                (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
                 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
@@ -845,12 +840,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
 
 static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 {
-       int min, hw_min, max, hw_max, cpu, range, adj_range;
-       struct perf_limits *perf_limits = limits;
+       int min, hw_min, max, hw_max, cpu;
+       struct perf_limits *perf_limits = &global;
        u64 value, cap;
 
        for_each_cpu(cpu, policy->cpus) {
-               int max_perf_pct, min_perf_pct;
                struct cpudata *cpu_data = all_cpu_data[cpu];
                s16 epp;
 
@@ -859,24 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 
                rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
                hw_min = HWP_LOWEST_PERF(cap);
-               if (limits->no_turbo)
+               if (global.no_turbo)
                        hw_max = HWP_GUARANTEED_PERF(cap);
                else
                        hw_max = HWP_HIGHEST_PERF(cap);
-               range = hw_max - hw_min;
 
-               max_perf_pct = perf_limits->max_perf_pct;
-               min_perf_pct = perf_limits->min_perf_pct;
+               max = fp_ext_toint(hw_max * perf_limits->max_perf);
+               if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+                       min = max;
+               else
+                       min = fp_ext_toint(hw_max * perf_limits->min_perf);
 
                rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-               adj_range = min_perf_pct * range / 100;
-               min = hw_min + adj_range;
+
                value &= ~HWP_MIN_PERF(~0L);
                value |= HWP_MIN_PERF(min);
 
-               adj_range = max_perf_pct * range / 100;
-               max = hw_min + adj_range;
-
                value &= ~HWP_MAX_PERF(~0L);
                value |= HWP_MAX_PERF(max);
 
@@ -969,26 +961,18 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
 }
 
 static void intel_pstate_update_policies(void)
-       __releases(&intel_pstate_limits_lock)
-       __acquires(&intel_pstate_limits_lock)
 {
-       struct perf_limits *saved_limits = limits;
        int cpu;
 
-       mutex_unlock(&intel_pstate_limits_lock);
-
        for_each_possible_cpu(cpu)
                cpufreq_update_policy(cpu);
-
-       mutex_lock(&intel_pstate_limits_lock);
-
-       limits = saved_limits;
 }
 
 /************************** debugfs begin ************************/
 static int pid_param_set(void *data, u64 val)
 {
        *(u32 *)data = val;
+       pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
        intel_pstate_reset_all_pid();
        return 0;
 }
@@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
        static ssize_t show_##file_name                                 \
        (struct kobject *kobj, struct attribute *attr, char *buf)       \
        {                                                               \
-               return sprintf(buf, "%u\n", limits->object);            \
+               return sprintf(buf, "%u\n", global.object);             \
        }
 
 static ssize_t intel_pstate_show_status(char *buf);
@@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
        }
 
        update_turbo_state();
-       if (limits->turbo_disabled)
-               ret = sprintf(buf, "%u\n", limits->turbo_disabled);
+       if (global.turbo_disabled)
+               ret = sprintf(buf, "%u\n", global.turbo_disabled);
        else
-               ret = sprintf(buf, "%u\n", limits->no_turbo);
+               ret = sprintf(buf, "%u\n", global.no_turbo);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
        mutex_lock(&intel_pstate_limits_lock);
 
        update_turbo_state();
-       if (limits->turbo_disabled) {
+       if (global.turbo_disabled) {
                pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                mutex_unlock(&intel_pstate_limits_lock);
                mutex_unlock(&intel_pstate_driver_lock);
                return -EPERM;
        }
 
-       limits->no_turbo = clamp_t(int, input, 0, 1);
-
-       intel_pstate_update_policies();
+       global.no_turbo = clamp_t(int, input, 0, 1);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
-       limits->max_perf_pct = min(limits->max_policy_pct,
-                                  limits->max_sysfs_pct);
-       limits->max_perf_pct = max(limits->min_policy_pct,
-                                  limits->max_perf_pct);
-       limits->max_perf_pct = max(limits->min_perf_pct,
-                                  limits->max_perf_pct);
-       limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
-
-       intel_pstate_update_policies();
+       global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+       global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
+       global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
+       global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
+       global.max_perf = percent_ext_fp(global.max_perf_pct);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
-       limits->min_perf_pct = max(limits->min_policy_pct,
-                                  limits->min_sysfs_pct);
-       limits->min_perf_pct = min(limits->max_policy_pct,
-                                  limits->min_perf_pct);
-       limits->min_perf_pct = min(limits->max_perf_pct,
-                                  limits->min_perf_pct);
-       limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-
-       intel_pstate_update_policies();
+       global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
+       global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
+       global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
+       global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
+       global.min_perf = percent_ext_fp(global.min_perf_pct);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
        u32 vid;
 
        val = (u64)pstate << 8;
-       if (limits->no_turbo && !limits->turbo_disabled)
+       if (global.no_turbo && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        vid_fp = cpudata->vid.min + mul_fp(
@@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
        u64 val;
 
        val = (u64)pstate << 8;
-       if (limits->no_turbo && !limits->turbo_disabled)
+       if (global.no_turbo && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        return val;
@@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        int max_perf = cpu->pstate.turbo_pstate;
        int max_perf_adj;
        int min_perf;
-       struct perf_limits *perf_limits = limits;
+       struct perf_limits *perf_limits = &global;
 
-       if (limits->no_turbo || limits->turbo_disabled)
+       if (global.no_turbo || global.turbo_disabled)
                max_perf = cpu->pstate.max_pstate;
 
        if (per_cpu_limits)
@@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
 
        sample->busy_scaled = busy_frac * 100;
 
-       target = limits->no_turbo || limits->turbo_disabled ?
+       target = global.no_turbo || global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        target += target >> 2;
        target = mul_fp(target, busy_frac);
@@ -2080,36 +2058,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
                                            struct perf_limits *limits)
 {
+       int32_t max_policy_perf, min_policy_perf;
 
-       limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
-                                             policy->cpuinfo.max_freq);
-       limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+       max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
+       max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
        if (policy->max == policy->min) {
-               limits->min_policy_pct = limits->max_policy_pct;
+               min_policy_perf = max_policy_perf;
        } else {
-               limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
-                                                     policy->cpuinfo.max_freq);
-               limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
-                                                0, 100);
+               min_policy_perf = div_ext_fp(policy->min,
+                                            policy->cpuinfo.max_freq);
+               min_policy_perf = clamp_t(int32_t, min_policy_perf,
+                                         0, max_policy_perf);
        }
 
-       /* Normalize user input to [min_policy_pct, max_policy_pct] */
-       limits->min_perf_pct = max(limits->min_policy_pct,
-                                  limits->min_sysfs_pct);
-       limits->min_perf_pct = min(limits->max_policy_pct,
-                                  limits->min_perf_pct);
-       limits->max_perf_pct = min(limits->max_policy_pct,
-                                  limits->max_sysfs_pct);
-       limits->max_perf_pct = max(limits->min_policy_pct,
-                                  limits->max_perf_pct);
-
-       /* Make sure min_perf_pct <= max_perf_pct */
-       limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
-
-       limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-       limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+       /* Normalize user input to [min_perf, max_perf] */
+       limits->min_perf = max(min_policy_perf,
+                              percent_ext_fp(limits->min_sysfs_pct));
+       limits->min_perf = min(limits->min_perf, max_policy_perf);
+       limits->max_perf = min(max_policy_perf,
+                              percent_ext_fp(limits->max_sysfs_pct));
+       limits->max_perf = max(min_policy_perf, limits->max_perf);
+
+       /* Make sure min_perf <= max_perf */
+       limits->min_perf = min(limits->min_perf, limits->max_perf);
+
        limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
        limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+       limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
+       limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
 
        pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
                 limits->max_perf_pct, limits->min_perf_pct);
@@ -2118,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
-       struct perf_limits *perf_limits = NULL;
+       struct perf_limits *perf_limits = &global;
 
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
@@ -2141,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
-               pr_debug("set performance\n");
-               if (!perf_limits) {
-                       limits = &performance_limits;
-                       perf_limits = limits;
-               }
-       } else {
-               pr_debug("set powersave\n");
-               if (!perf_limits) {
-                       limits = &powersave_limits;
-                       perf_limits = limits;
-               }
-
-       }
-
        intel_pstate_update_perf_limits(policy, perf_limits);
 
        if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -2179,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
-       struct perf_limits *perf_limits;
-
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
-               perf_limits = &performance_limits;
-       else
-               perf_limits = &powersave_limits;
 
        update_turbo_state();
-       policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
-                                       perf_limits->no_turbo ?
+       policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
                                        cpu->pstate.max_freq :
                                        cpu->pstate.turbo_freq;
 
@@ -2203,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
                unsigned int max_freq, min_freq;
 
                max_freq = policy->cpuinfo.max_freq *
-                                       perf_limits->max_sysfs_pct / 100;
+                                       global.max_sysfs_pct / 100;
                min_freq = policy->cpuinfo.max_freq *
-                                       perf_limits->min_sysfs_pct / 100;
+                                       global.min_sysfs_pct / 100;
                cpufreq_verify_within_limits(policy, min_freq, max_freq);
        }
 
@@ -2257,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        update_turbo_state();
-       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+       policy->cpuinfo.max_freq = global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        policy->cpuinfo.max_freq *= cpu->pstate.scaling;
 
@@ -2277,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
                return ret;
 
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+       if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
                policy->policy = CPUFREQ_POLICY_PERFORMANCE;
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2303,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
        struct cpudata *cpu = all_cpu_data[policy->cpu];
 
        update_turbo_state();
-       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+       policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
                        cpu->pstate.max_freq : cpu->pstate.turbo_freq;
 
        cpufreq_verify_within_cpu_limits(policy);
@@ -2311,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
        return 0;
 }
 
-static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
-                                              struct cpufreq_policy *policy,
-                                              unsigned int target_freq)
-{
-       unsigned int max_freq;
-
-       update_turbo_state();
-
-       max_freq = limits->no_turbo || limits->turbo_disabled ?
-                       cpu->pstate.max_freq : cpu->pstate.turbo_freq;
-       policy->cpuinfo.max_freq = max_freq;
-       if (policy->max > max_freq)
-               policy->max = max_freq;
-
-       if (target_freq > max_freq)
-               target_freq = max_freq;
-
-       return target_freq;
-}
-
 static int intel_cpufreq_target(struct cpufreq_policy *policy,
                                unsigned int target_freq,
                                unsigned int relation)
@@ -2339,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
        struct cpufreq_freqs freqs;
        int target_pstate;
 
+       update_turbo_state();
+
        freqs.old = policy->cur;
-       freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+       freqs.new = target_freq;
 
        cpufreq_freq_transition_begin(policy, &freqs);
        switch (relation) {
@@ -2372,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
        struct cpudata *cpu = all_cpu_data[policy->cpu];
        int target_pstate;
 
-       target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+       update_turbo_state();
+
        target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
        target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
        intel_pstate_update_pstate(cpu, target_pstate);
@@ -2427,13 +2364,7 @@ static int intel_pstate_register_driver(void)
 {
        int ret;
 
-       intel_pstate_init_limits(&powersave_limits);
-       intel_pstate_set_performance_limits(&performance_limits);
-       if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
-           intel_pstate_driver == &intel_pstate)
-               limits = &performance_limits;
-       else
-               limits = &powersave_limits;
+       intel_pstate_init_limits(&global);
 
        ret = cpufreq_register_driver(intel_pstate_driver);
        if (ret) {
index c5adc8c..ae948b1 100644 (file)
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
        struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
        int error;
 
+       /*
+        * Return if cpu_device is not setup for this CPU.
+        *
+        * This could happen if the arch did not set up cpu_device
+        * since this CPU is not in cpu_present mask and the
+        * driver did not send a correct CPU mask during registration.
+        * Without this check we would end up passing bogus
+        * value for &cpu_dev->kobj in kobject_init_and_add()
+        */
+       if (!cpu_dev)
+               return -ENODEV;
+
        kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
        if (!kdev)
                return -ENOMEM;
index 511ab04..92d1c69 100644 (file)
@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
  */
 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
-       struct ccp_device *ccp = ccp_get_device();
+       struct ccp_device *ccp;
        unsigned long flags;
        unsigned int i;
        int ret;
 
+       /* Some commands might need to be sent to a specific device */
+       ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
+
        if (!ccp)
                return -ENODEV;
 
index e5d9278..8d0eeb4 100644 (file)
@@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
                        goto err;
 
                ccp_cmd = &cmd->ccp_cmd;
+               ccp_cmd->ccp = chan->ccp;
                ccp_pt = &ccp_cmd->u.passthru_nomap;
                ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
                ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
index 8d9829f..80c6db2 100644 (file)
@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
        int rc = VM_FAULT_SIGBUS;
        phys_addr_t phys;
        pfn_t pfn;
+       unsigned int fault_size = PAGE_SIZE;
 
        if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
+       if (fault_size != dax_region->align)
+               return VM_FAULT_SIGBUS;
+
        phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
        if (phys == -1) {
-               dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+               dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
                                vmf->pgoff);
                return VM_FAULT_SIGBUS;
        }
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
        phys_addr_t phys;
        pgoff_t pgoff;
        pfn_t pfn;
+       unsigned int fault_size = PMD_SIZE;
 
        if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
+       if (fault_size < dax_region->align)
+               return VM_FAULT_SIGBUS;
+       else if (fault_size > dax_region->align)
+               return VM_FAULT_FALLBACK;
+
+       /* if we are outside of the VMA */
+       if (pmd_addr < vmf->vma->vm_start ||
+                       (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
        pgoff = linear_page_index(vmf->vma, pmd_addr);
        phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
        if (phys == -1) {
-               dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+               dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
                                pgoff);
                return VM_FAULT_SIGBUS;
        }
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
        phys_addr_t phys;
        pgoff_t pgoff;
        pfn_t pfn;
+       unsigned int fault_size = PUD_SIZE;
+
 
        if (check_vma(dax_dev, vmf->vma, __func__))
                return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
+       if (fault_size < dax_region->align)
+               return VM_FAULT_SIGBUS;
+       else if (fault_size > dax_region->align)
+               return VM_FAULT_FALLBACK;
+
+       /* if we are outside of the VMA */
+       if (pud_addr < vmf->vma->vm_start ||
+                       (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
        pgoff = linear_page_index(vmf->vma, pud_addr);
        phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
        if (phys == -1) {
-               dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+               dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
                                pgoff);
                return VM_FAULT_SIGBUS;
        }
index 9e1a138..16a8951 100644 (file)
@@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
        gpio->regmap = a10sr->regmap;
 
        gpio->gp = altr_a10sr_gc;
-
+       gpio->gp.parent = pdev->dev.parent;
        gpio->gp.of_node = pdev->dev.of_node;
 
        ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
index 5bddbd5..3fe6a21 100644 (file)
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
 
        altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
 
-       if (type == IRQ_TYPE_NONE)
+       if (type == IRQ_TYPE_NONE) {
+               irq_set_handler_locked(d, handle_bad_irq);
                return 0;
-       if (type == IRQ_TYPE_LEVEL_HIGH &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
-               return 0;
-       if (type == IRQ_TYPE_EDGE_RISING &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
-               return 0;
-       if (type == IRQ_TYPE_EDGE_FALLING &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
-               return 0;
-       if (type == IRQ_TYPE_EDGE_BOTH &&
-               altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
+       }
+       if (type == altera_gc->interrupt_trigger) {
+               if (type == IRQ_TYPE_LEVEL_HIGH)
+                       irq_set_handler_locked(d, handle_level_irq);
+               else
+                       irq_set_handler_locked(d, handle_simple_irq);
                return 0;
-
+       }
+       irq_set_handler_locked(d, handle_bad_irq);
        return -EINVAL;
 }
 
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-
 static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
 {
        struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
        altera_gc->interrupt_trigger = reg;
 
        ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
-               handle_simple_irq, IRQ_TYPE_NONE);
+               handle_bad_irq, IRQ_TYPE_NONE);
 
        if (ret) {
                dev_err(&pdev->dev, "could not add irqchip\n");
index bdb6923..2a57d02 100644 (file)
@@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
 static irqreturn_t mcp23s08_irq(int irq, void *data)
 {
        struct mcp23s08 *mcp = data;
-       int intcap, intf, i;
+       int intcap, intf, i, gpio, gpio_orig, intcap_mask;
        unsigned int child_irq;
+       bool intf_set, intcap_changed, gpio_bit_changed,
+               defval_changed, gpio_set;
 
        mutex_lock(&mcp->lock);
        if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
@@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
        }
 
        mcp->cache[MCP_INTCAP] = intcap;
+
+       /* This clears the interrupt(configurable on S18) */
+       if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
+               mutex_unlock(&mcp->lock);
+               return IRQ_HANDLED;
+       }
+       gpio_orig = mcp->cache[MCP_GPIO];
+       mcp->cache[MCP_GPIO] = gpio;
        mutex_unlock(&mcp->lock);
 
+       if (mcp->cache[MCP_INTF] == 0) {
+               /* There is no interrupt pending */
+               return IRQ_HANDLED;
+       }
+
+       dev_dbg(mcp->chip.parent,
+               "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
+               intcap, intf, gpio_orig, gpio);
 
        for (i = 0; i < mcp->chip.ngpio; i++) {
-               if ((BIT(i) & mcp->cache[MCP_INTF]) &&
-                   ((BIT(i) & intcap & mcp->irq_rise) ||
-                    (mcp->irq_fall & ~intcap & BIT(i)) ||
-                    (BIT(i) & mcp->cache[MCP_INTCON]))) {
+               /* We must check all of the inputs on the chip,
+                * otherwise we may not notice a change on >=2 pins.
+                *
+                * On at least the mcp23s17, INTCAP is only updated
+                * one byte at a time(INTCAPA and INTCAPB are
+                * not written to at the same time - only on a per-bank
+                * basis).
+                *
+                * INTF only contains the single bit that caused the
+                * interrupt per-bank.  On the mcp23s17, there is
+                * INTFA and INTFB.  If two pins are changed on the A
+                * side at the same time, INTF will only have one bit
+                * set.  If one pin on the A side and one pin on the B
+                * side are changed at the same time, INTF will have
+                * two bits set.  Thus, INTF can't be the only check
+                * to see if the input has changed.
+                */
+
+               intf_set = BIT(i) & mcp->cache[MCP_INTF];
+               if (i < 8 && intf_set)
+                       intcap_mask = 0x00FF;
+               else if (i >= 8 && intf_set)
+                       intcap_mask = 0xFF00;
+               else
+                       intcap_mask = 0x00;
+
+               intcap_changed = (intcap_mask &
+                       (BIT(i) & mcp->cache[MCP_INTCAP])) !=
+                       (intcap_mask & (BIT(i) & gpio_orig));
+               gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
+               gpio_bit_changed = (BIT(i) & gpio_orig) !=
+                       (BIT(i) & mcp->cache[MCP_GPIO]);
+               defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
+                       ((BIT(i) & mcp->cache[MCP_GPIO]) !=
+                       (BIT(i) & mcp->cache[MCP_DEFVAL]));
+
+               if (((gpio_bit_changed || intcap_changed) &&
+                       (BIT(i) & mcp->irq_rise) && gpio_set) ||
+                   ((gpio_bit_changed || intcap_changed) &&
+                       (BIT(i) & mcp->irq_fall) && !gpio_set) ||
+                   defval_changed) {
                        child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
                        handle_nested_irq(child_irq);
                }
index 06dac72..d993386 100644 (file)
@@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
        struct seq_file *sfile;
        struct gpio_desc *desc;
        struct gpio_chip *gc;
-       int status, val;
+       int val;
        char buf;
 
        sfile = file->private_data;
@@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
        chip = priv->chip;
        gc = &chip->gc;
 
-       status = copy_from_user(&buf, usr_buf, 1);
-       if (status)
-               return status;
+       if (copy_from_user(&buf, usr_buf, 1))
+               return -EFAULT;
 
        if (buf == '0')
                val = 0;
index 40a8881..f1c6ec1 100644 (file)
@@ -42,9 +42,7 @@ struct xgene_gpio {
        struct gpio_chip        chip;
        void __iomem            *base;
        spinlock_t              lock;
-#ifdef CONFIG_PM
        u32                     set_dr_val[XGENE_MAX_GPIO_BANKS];
-#endif
 };
 
 static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int xgene_gpio_suspend(struct device *dev)
+static __maybe_unused int xgene_gpio_suspend(struct device *dev)
 {
        struct xgene_gpio *gpio = dev_get_drvdata(dev);
        unsigned long bank_offset;
@@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
        return 0;
 }
 
-static int xgene_gpio_resume(struct device *dev)
+static __maybe_unused int xgene_gpio_resume(struct device *dev)
 {
        struct xgene_gpio *gpio = dev_get_drvdata(dev);
        unsigned long bank_offset;
@@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
 }
 
 static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
-#define XGENE_GPIO_PM_OPS      (&xgene_gpio_pm)
-#else
-#define XGENE_GPIO_PM_OPS      NULL
-#endif
 
 static int xgene_gpio_probe(struct platform_device *pdev)
 {
@@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
                .name = "xgene-gpio",
                .of_match_table = xgene_gpio_of_match,
                .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
-               .pm     = XGENE_GPIO_PM_OPS,
+               .pm     = &xgene_gpio_pm,
        },
        .probe = xgene_gpio_probe,
 };
index 8363cb5..8a08e81 100644 (file)
@@ -3,6 +3,4 @@
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
 
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
index d2d0f60..99424cb 100644 (file)
@@ -240,6 +240,8 @@ free_partial_kdata:
        for (; i >= 0; i--)
                drm_free_large(p->chunks[i].kdata);
        kfree(p->chunks);
+       p->chunks = NULL;
+       p->nchunks = 0;
 put_ctx:
        amdgpu_ctx_put(p->ctx);
 free_chunk:
index 4120b35..de0cf33 100644 (file)
@@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
        int r;
 
        if (adev->wb.wb_obj == NULL) {
-               r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+               r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
                                            PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
                                            &adev->wb.wb_obj, &adev->wb.gpu_addr,
                                            (void **)&adev->wb.wb);
@@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
                memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 
                /* clear wb memory */
-               memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
+               memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
        }
 
        return 0;
@@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
                use_bank = 0;
        }
 
-       *pos &= 0x3FFFF;
+       *pos &= (1UL << 22) - 1;
 
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
                use_bank = 0;
        }
 
-       *pos &= 0x3FFFF;
+       *pos &= (1UL << 22) - 1;
 
        if (use_bank) {
                if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
index f7adbac..b76cd69 100644 (file)
@@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 
        {0, 0, 0}
index f55e45b..c5dec21 100644 (file)
@@ -3464,6 +3464,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
                }
+       } else if (adev->asic_type == CHIP_OLAND) {
+               if ((adev->pdev->revision == 0xC7) ||
+                   (adev->pdev->revision == 0x80) ||
+                   (adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->revision == 0x87) ||
+                   (adev->pdev->device == 0x6604) ||
+                   (adev->pdev->device == 0x6605)) {
+                       max_sclk = 75000;
+               }
        }
 
        if (rps->vce_active) {
index 50bdb24..4a785d6 100644 (file)
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
                /* rev0 hardware requires workarounds to support PG */
                adev->pg_flags = 0;
                if (adev->rev_id != 0x00) {
-                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+                       adev->pg_flags |=
                                AMD_PG_SUPPORT_GFX_SMG |
                                AMD_PG_SUPPORT_GFX_PIPELINE |
                                AMD_PG_SUPPORT_CP |
index 8cf71f3..261b828 100644 (file)
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
        if (bgate) {
                cgs_set_powergating_state(hwmgr->device,
                                                AMD_IP_BLOCK_TYPE_VCE,
-                                               AMD_PG_STATE_UNGATE);
+                                               AMD_PG_STATE_GATE);
                cgs_set_clockgating_state(hwmgr->device,
                                AMD_IP_BLOCK_TYPE_VCE,
                                AMD_CG_STATE_GATE);
index 08e6a71..294b536 100644 (file)
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
 
        clk_prepare_enable(hwdev->pxlclk);
 
-       /* mclk needs to be set to the same or higher rate than pxlclk */
-       clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+       /* We rely on firmware to set mclk to a sensible level. */
        clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
 
        hwdev->modeset(hwdev, &vm);
index 488aedf..9f55130 100644 (file)
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
        { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
        { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
        { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
-       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
+       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
 };
 
 #define MALIDP_DE_DEFAULT_PREFETCH_START       5
index 414aada..d5aec08 100644 (file)
@@ -37,6 +37,8 @@
 #define   LAYER_V_VAL(x)               (((x) & 0x1fff) << 16)
 #define MALIDP_LAYER_COMP_SIZE         0x010
 #define MALIDP_LAYER_OFFSET            0x014
+#define MALIDP550_LS_ENABLE            0x01c
+#define MALIDP550_LS_R1_IN_SIZE                0x020
 
 /*
  * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
                        LAYER_V_VAL(plane->state->crtc_y),
                        mp->layer->base + MALIDP_LAYER_OFFSET);
 
+       if (mp->layer->id == DE_SMART)
+               malidp_hw_write(mp->hwdev,
+                               LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+                               mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+
        /* first clear the rotation bits */
        val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
        val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
                plane->hwdev = malidp->dev;
                plane->layer = &map->layers[i];
 
-               /* Skip the features which the SMART layer doesn't have */
-               if (id == DE_SMART)
+               if (id == DE_SMART) {
+                       /*
+                        * Enable the first rectangle in the SMART layer to be
+                        * able to use it as a drm plane.
+                        */
+                       malidp_hw_write(malidp->dev, 1,
+                                       plane->layer->base + MALIDP550_LS_ENABLE);
+                       /* Skip the features which the SMART layer doesn't have. */
                        continue;
+               }
 
                drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
                malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
index aff6d4a..b816067 100644 (file)
@@ -84,6 +84,7 @@
 /* Stride register offsets relative to Lx_BASE */
 #define MALIDP_DE_LG_STRIDE            0x18
 #define MALIDP_DE_LV_STRIDE0           0x18
+#define MALIDP550_DE_LS_R1_STRIDE      0x28
 
 /* macros to set values into registers */
 #define MALIDP_DE_H_FRONTPORCH(x)      (((x) & 0xfff) << 0)
index f6d4d97..324a688 100644 (file)
@@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
         * to KMS, hence fail if different settings are requested.
         */
        if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
-           var->xres != fb->width || var->yres != fb->height ||
-           var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
-               DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
+           var->xres > fb->width || var->yres > fb->height ||
+           var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+               DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
                          "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
                          var->xres, var->yres, var->bits_per_pixel,
                          var->xres_virtual, var->yres_virtual,
index 0fd6f7a..c0e8d33 100644 (file)
@@ -68,6 +68,8 @@ struct decon_context {
        unsigned long                   flags;
        unsigned long                   out_type;
        int                             first_win;
+       spinlock_t                      vblank_lock;
+       u32                             frame_id;
 };
 
 static const uint32_t decon_formats[] = {
@@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
                if (ctx->out_type & IFTYPE_I80)
                        val |= VIDINTCON0_FRAMEDONE;
                else
-                       val |= VIDINTCON0_INTFRMEN;
+                       val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
 
                writel(val, ctx->addr + DECON_VIDINTCON0);
        }
@@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
                writel(0, ctx->addr + DECON_VIDINTCON0);
 }
 
+/* return number of starts/ends of frame transmissions since reset */
+static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
+{
+       u32 frm, pfrm, status, cnt = 2;
+
+       /* To get consistent result repeat read until frame id is stable.
+        * Usually the loop will be executed once, in rare cases when the loop
+        * is executed at frame change time 2nd pass will be needed.
+        */
+       frm = readl(ctx->addr + DECON_CRFMID);
+       do {
+               status = readl(ctx->addr + DECON_VIDCON1);
+               pfrm = frm;
+               frm = readl(ctx->addr + DECON_CRFMID);
+       } while (frm != pfrm && --cnt);
+
+       /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
+        * of RGB, it should be taken into account.
+        */
+       if (!frm)
+               return 0;
+
+       switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
+       case VIDCON1_VSTATUS_VS:
+               if (!(ctx->out_type & IFTYPE_I80))
+                       --frm;
+               break;
+       case VIDCON1_VSTATUS_BP:
+               --frm;
+               break;
+       case VIDCON1_I80_ACTIVE:
+       case VIDCON1_VSTATUS_AC:
+               if (end)
+                       --frm;
+               break;
+       default:
+               break;
+       }
+
+       return frm;
+}
+
 static void decon_setup_trigger(struct decon_context *ctx)
 {
        if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
                return;
 
        if (!(ctx->out_type & I80_HW_TRG)) {
-               writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
-                      TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
+               writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
+                      TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
                       ctx->addr + DECON_TRIGCON);
                return;
        }
@@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
+       unsigned long flags;
        int i;
 
        if (test_bit(BIT_SUSPENDED, &ctx->flags))
                return;
 
+       spin_lock_irqsave(&ctx->vblank_lock, flags);
+
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
@@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 
        if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
+
+       ctx->frame_id = decon_get_frame_count(ctx, true);
+
+       exynos_crtc_handle_event(crtc);
+
+       spin_unlock_irqrestore(&ctx->vblank_lock, flags);
 }
 
 static void decon_swreset(struct decon_context *ctx)
 {
        unsigned int tries;
+       unsigned long flags;
 
        writel(0, ctx->addr + DECON_VIDCON0);
        for (tries = 2000; tries; --tries) {
@@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
 
        WARN(tries == 0, "failed to software reset DECON\n");
 
+       spin_lock_irqsave(&ctx->vblank_lock, flags);
+       ctx->frame_id = 0;
+       spin_unlock_irqrestore(&ctx->vblank_lock, flags);
+
        if (!(ctx->out_type & IFTYPE_HDMI))
                return;
 
@@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
        .unbind = decon_unbind,
 };
 
+static void decon_handle_vblank(struct decon_context *ctx)
+{
+       u32 frm;
+
+       spin_lock(&ctx->vblank_lock);
+
+       frm = decon_get_frame_count(ctx, true);
+
+       if (frm != ctx->frame_id) {
+               /* handle only if incremented, take care of wrap-around */
+               if ((s32)(frm - ctx->frame_id) > 0)
+                       drm_crtc_handle_vblank(&ctx->crtc->base);
+               ctx->frame_id = frm;
+       }
+
+       spin_unlock(&ctx->vblank_lock);
+}
+
 static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 {
        struct decon_context *ctx = dev_id;
@@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
                            (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
                                return IRQ_HANDLED;
                }
-               drm_crtc_handle_vblank(&ctx->crtc->base);
+               decon_handle_vblank(ctx);
        }
 
 out:
@@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
        __set_bit(BIT_SUSPENDED, &ctx->flags);
        ctx->dev = dev;
        ctx->out_type = (unsigned long)of_device_get_match_data(dev);
+       spin_lock_init(&ctx->vblank_lock);
 
        if (ctx->out_type & IFTYPE_HDMI) {
                ctx->first_win = 1;
@@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
                ctx->out_type |= IFTYPE_I80;
        }
 
-       if (ctx->out_type | I80_HW_TRG) {
+       if (ctx->out_type & I80_HW_TRG) {
                ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
                                                        "samsung,disp-sysreg");
                if (IS_ERR(ctx->sysreg)) {
index f9ab19e..4881180 100644 (file)
@@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 
        for (i = 0; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
+       exynos_crtc_handle_event(crtc);
 }
 
 static void decon_init(struct decon_context *ctx)
index 5367b66..c65f450 100644 (file)
@@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_crtc_state)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-       struct drm_pending_vblank_event *event;
-       unsigned long flags;
 
        if (exynos_crtc->ops->atomic_flush)
                exynos_crtc->ops->atomic_flush(exynos_crtc);
+}
+
+static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+       .enable         = exynos_drm_crtc_enable,
+       .disable        = exynos_drm_crtc_disable,
+       .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
+       .atomic_check   = exynos_crtc_atomic_check,
+       .atomic_begin   = exynos_crtc_atomic_begin,
+       .atomic_flush   = exynos_crtc_atomic_flush,
+};
+
+void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
+{
+       struct drm_crtc *crtc = &exynos_crtc->base;
+       struct drm_pending_vblank_event *event = crtc->state->event;
+       unsigned long flags;
 
-       event = crtc->state->event;
        if (event) {
                crtc->state->event = NULL;
-
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
                if (drm_crtc_vblank_get(crtc) == 0)
                        drm_crtc_arm_vblank_event(crtc, event);
@@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
 
 }
 
-static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
-       .enable         = exynos_drm_crtc_enable,
-       .disable        = exynos_drm_crtc_disable,
-       .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
-       .atomic_check   = exynos_crtc_atomic_check,
-       .atomic_begin   = exynos_crtc_atomic_begin,
-       .atomic_flush   = exynos_crtc_atomic_flush,
-};
-
 static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
 {
        struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
index 6a581a8..abd5d6c 100644 (file)
@@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
  */
 void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
 
+void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
+
 #endif
index 812e2ec..d7ef263 100644 (file)
@@ -86,7 +86,7 @@
 #define DSIM_SYNC_INFORM               (1 << 27)
 #define DSIM_EOT_DISABLE               (1 << 28)
 #define DSIM_MFLUSH_VS                 (1 << 29)
-/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
+/* This flag is valid only for exynos3250/3472/5260/5430 */
 #define DSIM_CLKLANE_STOP              (1 << 30)
 
 /* DSIM_ESCMODE */
@@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
        .reg_values = reg_values,
 };
 
-static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
-       .reg_ofs = exynos_reg_ofs,
-       .plltmr_reg = 0x58,
-       .has_clklane_stop = 1,
-       .num_clks = 2,
-       .max_freq = 1000,
-       .wait_for_reset = 1,
-       .num_bits_resol = 11,
-       .reg_values = reg_values,
-};
-
 static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
        .reg_ofs = exynos_reg_ofs,
        .plltmr_reg = 0x58,
@@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
          .data = &exynos3_dsi_driver_data },
        { .compatible = "samsung,exynos4210-mipi-dsi",
          .data = &exynos4_dsi_driver_data },
-       { .compatible = "samsung,exynos4415-mipi-dsi",
-         .data = &exynos4415_dsi_driver_data },
        { .compatible = "samsung,exynos5410-mipi-dsi",
          .data = &exynos5_dsi_driver_data },
        { .compatible = "samsung,exynos5422-mipi-dsi",
@@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
        bool first = !xfer->tx_done;
        u32 reg;
 
-       dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
+       dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
                xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
 
        if (length > DSI_TX_FIFO_SIZE)
@@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
        spin_unlock_irqrestore(&dsi->transfer_lock, flags);
 
        dev_dbg(dsi->dev,
-               "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+               "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
                xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
                xfer->rx_done);
 
@@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
        int te_gpio_irq;
 
        dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+       if (dsi->te_gpio == -ENOENT)
+               return 0;
+
        if (!gpio_is_valid(dsi->te_gpio)) {
-               dev_err(dsi->dev, "no te-gpios specified\n");
                ret = dsi->te_gpio;
+               dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
                goto out;
        }
 
index 9587157..5b18b5c 100644 (file)
@@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
-       DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
+       DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
 
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
index a9fa444..3f04d72 100644 (file)
 #define TRIGCON                                0x1A4
 #define TRGMODE_ENABLE                 (1 << 0)
 #define SWTRGCMD_ENABLE                        (1 << 1)
-/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */
+/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
 #define HWTRGEN_ENABLE                 (1 << 3)
 #define HWTRGMASK_ENABLE               (1 << 4)
-/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */
+/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
 #define HWTRIGEN_PER_ENABLE            (1 << 31)
 
 /* display mode change control register except exynos4 */
@@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
        .has_vtsel = 1,
 };
 
-static struct fimd_driver_data exynos4415_fimd_driver_data = {
-       .timing_base = 0x20000,
-       .lcdblk_offset = 0x210,
-       .lcdblk_vt_shift = 10,
-       .lcdblk_bypass_shift = 1,
-       .trg_type = I80_HW_TRG,
-       .has_shadowcon = 1,
-       .has_vidoutcon = 1,
-       .has_vtsel = 1,
-       .has_trigger_per_te = 1,
-};
-
 static struct fimd_driver_data exynos5_fimd_driver_data = {
        .timing_base = 0x20000,
        .lcdblk_offset = 0x214,
@@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
          .data = &exynos3_fimd_driver_data },
        { .compatible = "samsung,exynos4210-fimd",
          .data = &exynos4_fimd_driver_data },
-       { .compatible = "samsung,exynos4415-fimd",
-         .data = &exynos4415_fimd_driver_data },
        { .compatible = "samsung,exynos5250-fimd",
          .data = &exynos5_fimd_driver_data },
        { .compatible = "samsung,exynos5420-fimd",
@@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
                        val |= VIDINTCON0_INT_FRAME;
 
                        val &= ~VIDINTCON0_FRAMESEL0_MASK;
-                       val |= VIDINTCON0_FRAMESEL0_VSYNC;
+                       val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
                        val &= ~VIDINTCON0_FRAMESEL1_MASK;
                        val |= VIDINTCON0_FRAMESEL1_NONE;
                }
@@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
 
        for (i = 0; i < WINDOWS_NR; i++)
                fimd_shadow_protect_win(ctx, i, false);
+
+       exynos_crtc_handle_event(crtc);
 }
 
 static void fimd_update_plane(struct exynos_drm_crtc *crtc,
index 4c28f7f..55a1579 100644 (file)
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
                return ERR_PTR(ret);
        }
 
-       DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
+       DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
 
        return exynos_gem;
 }
index bef5798..0506b2b 100644 (file)
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
                return ret;
        }
 
-       DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
+       DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
 
        mutex_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
index 9c84ee7..3edda18 100644 (file)
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
         * e.g PAUSE state, queue buf, command control.
         */
        list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
-               DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
+               DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
 
                mutex_lock(&ippdrv->cmd_lock);
                list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
        }
        property->prop_id = ret;
 
-       DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
+       DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
                property->prop_id, property->cmd, ippdrv);
 
        /* stored property information and ippdrv in private data */
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
 {
        int i;
 
-       DRM_DEBUG_KMS("node[%p]\n", m_node);
+       DRM_DEBUG_KMS("node[%pK]\n", m_node);
 
        if (!m_node) {
                DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
        m_node->buf_id = qbuf->buf_id;
        INIT_LIST_HEAD(&m_node->list);
 
-       DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
+       DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
        DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
 
        for_each_ipp_planar(i) {
@@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
 
        mutex_lock(&c_node->event_lock);
        list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
-               DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
+               DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
 
                /*
                 * qbuf == NULL condition means all event deletion.
@@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
 
        /* find memory node from memory list */
        list_for_each_entry(m_node, head, list) {
-               DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
+               DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
 
                /* compare buffer id */
                if (m_node->buf_id == qbuf->buf_id)
@@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
        struct exynos_drm_ipp_ops *ops = NULL;
        int ret = 0;
 
-       DRM_DEBUG_KMS("node[%p]\n", m_node);
+       DRM_DEBUG_KMS("node[%pK]\n", m_node);
 
        if (!m_node) {
                DRM_ERROR("invalid queue node.\n");
@@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
                        m_node = list_first_entry(head,
                                struct drm_exynos_ipp_mem_node, list);
 
-                       DRM_DEBUG_KMS("m_node[%p]\n", m_node);
+                       DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
 
                        ret = ipp_set_mem_node(ippdrv, c_node, m_node);
                        if (ret) {
@@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
                }
                ippdrv->prop_list.ipp_id = ret;
 
-               DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
+               DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
                        count++, ippdrv, ret);
 
                /* store parent device for node */
@@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
 
        file_priv->ipp_dev = dev;
 
-       DRM_DEBUG_KMS("done priv[%p]\n", dev);
+       DRM_DEBUG_KMS("done priv[%pK]\n", dev);
 
        return 0;
 }
@@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
                mutex_lock(&ippdrv->cmd_lock);
                list_for_each_entry_safe(c_node, tc_node,
                        &ippdrv->cmd_list, list) {
-                       DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
+                       DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
                                count++, ippdrv);
 
                        if (c_node->filp == file) {
index 6591e40..79282a8 100644 (file)
@@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
                goto err_ippdrv_register;
        }
 
-       DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
+       DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
 
        platform_set_drvdata(pdev, rot);
 
index 57fe514..5d9a62a 100644 (file)
@@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
        .enable_vblank = vidi_enable_vblank,
        .disable_vblank = vidi_disable_vblank,
        .update_plane = vidi_update_plane,
+       .atomic_flush = exynos_crtc_handle_event,
 };
 
 static void vidi_fake_vblank_timer(unsigned long arg)
index 72143ac..25edb63 100644 (file)
@@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
                return;
 
        mixer_vsync_set_update(mixer_ctx, true);
+       exynos_crtc_handle_event(crtc);
 }
 
 static void mixer_enable(struct exynos_drm_crtc *crtc)
index 3b6caac..325618d 100644 (file)
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
        const char *item;
 
        if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
-               gvt_err("Invalid vGPU creation params\n");
+               gvt_vgpu_err("Invalid vGPU creation params\n");
                return -EINVAL;
        }
 
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
        return 0;
 
 no_enough_resource:
-       gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
-       gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
-               vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+       gvt_vgpu_err("fail to allocate resource %s\n", item);
+       gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
+               BYTES_TO_MB(request), BYTES_TO_MB(avail),
                BYTES_TO_MB(max), BYTES_TO_MB(taken));
        return -ENOSPC;
 }
index 7ae6e2b..2b92cc8 100644 (file)
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
        return ret;
 }
 
+static inline bool is_force_nonpriv_mmio(unsigned int offset)
+{
+       return (offset >= 0x24d0 && offset < 0x2500);
+}
+
+static int force_nonpriv_reg_handler(struct parser_exec_state *s,
+                                    unsigned int offset, unsigned int index)
+{
+       struct intel_gvt *gvt = s->vgpu->gvt;
+       unsigned int data = cmd_val(s, index + 1);
+
+       if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
+               gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
+                       offset, data);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int cmd_reg_handler(struct parser_exec_state *s,
        unsigned int offset, unsigned int index, char *cmd)
 {
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
        struct intel_gvt *gvt = vgpu->gvt;
 
        if (offset + 4 > gvt->device_info.mmio_size) {
-               gvt_err("%s access to (%x) outside of MMIO range\n",
+               gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
                                cmd, offset);
                return -EINVAL;
        }
 
        if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
-               gvt_err("vgpu%d: %s access to non-render register (%x)\n",
-                               s->vgpu->id, cmd, offset);
+               gvt_vgpu_err("%s access to non-render register (%x)\n",
+                               cmd, offset);
                return 0;
        }
 
        if (is_shadowed_mmio(offset)) {
-               gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
-                               s->vgpu->id, offset);
+               gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
                return 0;
        }
 
+       if (is_force_nonpriv_mmio(offset) &&
+           force_nonpriv_reg_handler(s, offset, index))
+               return -EINVAL;
+
        if (offset == i915_mmio_reg_offset(DERRMR) ||
                offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
                /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
                        ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
                else if (post_sync == 1) {
                        /* check ggtt*/
-                       if ((cmd_val(s, 2) & (1 << 2))) {
+                       if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
                                gma = cmd_val(s, 2) & GENMASK(31, 3);
                                if (gmadr_bytes == 8)
                                        gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
 {
        struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+       struct intel_vgpu *vgpu = s->vgpu;
        u32 dword0 = cmd_val(s, 0);
        u32 dword1 = cmd_val(s, 1);
        u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
                break;
 
        default:
-               gvt_err("unknown plane code %d\n", plane);
+               gvt_vgpu_err("unknown plane code %d\n", plane);
                return -EINVAL;
        }
 
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
 {
        struct mi_display_flip_command_info info;
+       struct intel_vgpu *vgpu = s->vgpu;
        int ret;
        int i;
        int len = cmd_length(s);
 
        ret = decode_mi_display_flip(s, &info);
        if (ret) {
-               gvt_err("fail to decode MI display flip command\n");
+               gvt_vgpu_err("fail to decode MI display flip command\n");
                return ret;
        }
 
        ret = check_mi_display_flip(s, &info);
        if (ret) {
-               gvt_err("invalid MI display flip command\n");
+               gvt_vgpu_err("invalid MI display flip command\n");
                return ret;
        }
 
        ret = update_plane_mmio_from_mi_display_flip(s, &info);
        if (ret) {
-               gvt_err("fail to update plane mmio\n");
+               gvt_vgpu_err("fail to update plane mmio\n");
                return ret;
        }
 
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        int ret;
 
        if (op_size > max_surface_size) {
-               gvt_err("command address audit fail name %s\n", s->info->name);
+               gvt_vgpu_err("command address audit fail name %s\n",
+                       s->info->name);
                return -EINVAL;
        }
 
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        }
        return 0;
 err:
-       gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+       gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
                        s->info->name, guest_gma, op_size);
 
        pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
 
 static inline int unexpected_cmd(struct parser_exec_state *s)
 {
-       gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
-                       s->vgpu->id, s->info->name);
+       struct intel_vgpu *vgpu = s->vgpu;
+
+       gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
+
        return -EINVAL;
 }
 
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
        while (gma != end_gma) {
                gpa = intel_vgpu_gma_to_gpa(mm, gma);
                if (gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("invalid gma address: %lx\n", gma);
+                       gvt_vgpu_err("invalid gma address: %lx\n", gma);
                        return -EFAULT;
                }
 
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
        uint32_t bb_size = 0;
        uint32_t cmd_len = 0;
        bool met_bb_end = false;
+       struct intel_vgpu *vgpu = s->vgpu;
        u32 cmd;
 
        /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
 
        info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
        if (info == NULL) {
-               gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+               gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
                                cmd, get_opcode(cmd, s->ring_id));
                return -EINVAL;
        }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
                                gma, gma + 4, &cmd);
                info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
                if (info == NULL) {
-                       gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+                       gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
                                cmd, get_opcode(cmd, s->ring_id));
                        return -EINVAL;
                }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
 static int perform_bb_shadow(struct parser_exec_state *s)
 {
        struct intel_shadow_bb_entry *entry_obj;
+       struct intel_vgpu *vgpu = s->vgpu;
        unsigned long gma = 0;
        uint32_t bb_size;
        void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 
        ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
        if (ret) {
-               gvt_err("failed to set shadow batch to CPU\n");
+               gvt_vgpu_err("failed to set shadow batch to CPU\n");
                goto unmap_src;
        }
 
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
                              gma, gma + bb_size,
                              dst);
        if (ret) {
-               gvt_err("fail to copy guest ring buffer\n");
+               gvt_vgpu_err("fail to copy guest ring buffer\n");
                goto unmap_src;
        }
 
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
 {
        bool second_level;
        int ret = 0;
+       struct intel_vgpu *vgpu = s->vgpu;
 
        if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
-               gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+               gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
                return -EINVAL;
        }
 
        second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
        if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
-               gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+               gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
                return -EINVAL;
        }
 
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
        if (batch_buffer_needs_scan(s)) {
                ret = perform_bb_shadow(s);
                if (ret < 0)
-                       gvt_err("invalid shadow batch buffer\n");
+                       gvt_vgpu_err("invalid shadow batch buffer\n");
        } else {
                /* emulate a batch buffer end to do return right */
                ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
        int ret = 0;
        cycles_t t0, t1, t2;
        struct parser_exec_state s_before_advance_custom;
+       struct intel_vgpu *vgpu = s->vgpu;
 
        t0 = get_cycles();
 
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 
        info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
        if (info == NULL) {
-               gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+               gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
                                cmd, get_opcode(cmd, s->ring_id));
                return -EINVAL;
        }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
        if (info->handler) {
                ret = info->handler(s);
                if (ret < 0) {
-                       gvt_err("%s handler error\n", info->name);
+                       gvt_vgpu_err("%s handler error\n", info->name);
                        return ret;
                }
        }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
        if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
                ret = cmd_advance_default(s);
                if (ret) {
-                       gvt_err("%s IP advance error\n", info->name);
+                       gvt_vgpu_err("%s IP advance error\n", info->name);
                        return ret;
                }
        }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
 
        unsigned long gma_head, gma_tail, gma_bottom;
        int ret = 0;
+       struct intel_vgpu *vgpu = s->vgpu;
 
        gma_head = rb_start + rb_head;
        gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
                if (s->buf_type == RING_BUFFER_INSTRUCTION) {
                        if (!(s->ip_gma >= rb_start) ||
                                !(s->ip_gma < gma_bottom)) {
-                               gvt_err("ip_gma %lx out of ring scope."
+                               gvt_vgpu_err("ip_gma %lx out of ring scope."
                                        "(base:0x%lx, bottom: 0x%lx)\n",
                                        s->ip_gma, rb_start,
                                        gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
                                return -EINVAL;
                        }
                        if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
-                               gvt_err("ip_gma %lx out of range."
+                               gvt_vgpu_err("ip_gma %lx out of range."
                                        "base 0x%lx head 0x%lx tail 0x%lx\n",
                                        s->ip_gma, rb_start,
                                        rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
                }
                ret = cmd_parser_exec(s);
                if (ret) {
-                       gvt_err("cmd parser error\n");
+                       gvt_vgpu_err("cmd parser error\n");
                        parser_exec_state_dump(s);
                        break;
                }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
                                gma_head, gma_top,
                                workload->shadow_ring_buffer_va);
                if (ret) {
-                       gvt_err("fail to copy guest ring buffer\n");
+                       gvt_vgpu_err("fail to copy guest ring buffer\n");
                        return ret;
                }
                copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
                        gma_head, gma_tail,
                        workload->shadow_ring_buffer_va + copy_len);
        if (ret) {
-               gvt_err("fail to copy guest ring buffer\n");
+               gvt_vgpu_err("fail to copy guest ring buffer\n");
                return ret;
        }
        ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 {
        int ret;
+       struct intel_vgpu *vgpu = workload->vgpu;
 
        ret = shadow_workload_ring_buffer(workload);
        if (ret) {
-               gvt_err("fail to shadow workload ring_buffer\n");
+               gvt_vgpu_err("fail to shadow workload ring_buffer\n");
                return ret;
        }
 
        ret = scan_workload(workload);
        if (ret) {
-               gvt_err("scan workload error\n");
+               gvt_vgpu_err("scan workload error\n");
                return ret;
        }
        return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        int ctx_size = wa_ctx->indirect_ctx.size;
        unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+       struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
        struct drm_i915_gem_object *obj;
        int ret = 0;
        void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        /* get the va of the shadow batch buffer */
        map = i915_gem_object_pin_map(obj, I915_MAP_WB);
        if (IS_ERR(map)) {
-               gvt_err("failed to vmap shadow indirect ctx\n");
+               gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
                ret = PTR_ERR(map);
                goto put_obj;
        }
 
        ret = i915_gem_object_set_to_cpu_domain(obj, false);
        if (ret) {
-               gvt_err("failed to set shadow indirect ctx to CPU\n");
+               gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
                goto unmap_src;
        }
 
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
                                guest_gma, guest_gma + ctx_size,
                                map);
        if (ret) {
-               gvt_err("fail to copy guest indirect ctx\n");
+               gvt_vgpu_err("fail to copy guest indirect ctx\n");
                goto unmap_src;
        }
 
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        int ret;
+       struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
 
        if (wa_ctx->indirect_ctx.size == 0)
                return 0;
 
        ret = shadow_indirect_ctx(wa_ctx);
        if (ret) {
-               gvt_err("fail to shadow indirect ctx\n");
+               gvt_vgpu_err("fail to shadow indirect ctx\n");
                return ret;
        }
 
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 
        ret = scan_wa_ctx(wa_ctx);
        if (ret) {
-               gvt_err("scan wa ctx error\n");
+               gvt_vgpu_err("scan wa ctx error\n");
                return ret;
        }
 
index 68cba7b..b0cff4d 100644 (file)
 #define gvt_err(fmt, args...) \
        DRM_ERROR("gvt: "fmt, ##args)
 
+#define gvt_vgpu_err(fmt, args...)                                     \
+do {                                                                   \
+       if (IS_ERR_OR_NULL(vgpu))                                       \
+               DRM_DEBUG_DRIVER("gvt: "fmt, ##args);                   \
+       else                                                            \
+               DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+} while (0)
+
 #define gvt_dbg_core(fmt, args...) \
        DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
 
index bda85df..f1648fe 100644 (file)
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
        unsigned char chr = 0;
 
        if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
-               gvt_err("Driver tries to read EDID without proper sequence!\n");
+               gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
                return 0;
        }
        if (edid->current_edid_read >= EDID_SIZE) {
-               gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+               gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
                return 0;
        }
 
        if (!edid->edid_available) {
-               gvt_err("Reading EDID but EDID is not available!\n");
+               gvt_vgpu_err("Reading EDID but EDID is not available!\n");
                return 0;
        }
 
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
                chr = edid_data->edid_block[edid->current_edid_read];
                edid->current_edid_read++;
        } else {
-               gvt_err("No EDID available during the reading?\n");
+               gvt_vgpu_err("No EDID available during the reading?\n");
        }
        return chr;
 }
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                        vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
                        break;
                default:
-                       gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+                       gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
                        break;
                }
                /*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
                 */
        } else {
                memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
-               gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
-                               vgpu->id);
+               gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
        }
        return 0;
 }
index 46eb9fd..f1f426a 100644 (file)
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
                struct intel_vgpu_execlist *execlist,
                struct execlist_ctx_descriptor_format *ctx)
 {
+       struct intel_vgpu *vgpu = execlist->vgpu;
        struct intel_vgpu_execlist_slot *running = execlist->running_slot;
        struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
        struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
        gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
 
        if (WARN_ON(!same_context(ctx, execlist->running_context))) {
-               gvt_err("schedule out context is not running context,"
+               gvt_vgpu_err("schedule out context is not running context,"
                                "ctx id %x running ctx id %x\n",
                                ctx->context_id,
                                execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
        status.udw = vgpu_vreg(vgpu, status_reg + 4);
 
        if (status.execlist_queue_full) {
-               gvt_err("virtual execlist slots are full\n");
+               gvt_vgpu_err("virtual execlist slots are full\n");
                return NULL;
        }
 
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
 
        struct execlist_ctx_descriptor_format *ctx0, *ctx1;
        struct execlist_context_status_format status;
+       struct intel_vgpu *vgpu = execlist->vgpu;
 
        gvt_dbg_el("emulate schedule-in\n");
 
        if (!slot) {
-               gvt_err("no available execlist slot\n");
+               gvt_vgpu_err("no available execlist slot\n");
                return -EINVAL;
        }
 
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 
                vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
                if (IS_ERR(vma)) {
-                       gvt_err("Cannot pin\n");
                        return;
                }
 
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
                                       0, CACHELINE_BYTES, 0);
        if (IS_ERR(vma)) {
-               gvt_err("Cannot pin indirect ctx obj\n");
                return;
        }
 
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 {
        struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
        struct intel_vgpu_mm *mm;
+       struct intel_vgpu *vgpu = workload->vgpu;
        int page_table_level;
        u32 pdp[8];
 
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
        } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
                page_table_level = 4;
        } else {
-               gvt_err("Advanced Context mode(SVM) is not supported!\n");
+               gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
                return -EINVAL;
        }
 
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
                mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
                                pdp, page_table_level, 0);
                if (IS_ERR(mm)) {
-                       gvt_err("fail to create mm object.\n");
+                       gvt_vgpu_err("fail to create mm object.\n");
                        return PTR_ERR(mm);
                }
        }
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
        ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
                        (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
        if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+               gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
                return -EINVAL;
        }
 
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
                        continue;
 
                if (!desc[i]->privilege_access) {
-                       gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("unexpected GGTT elsp submission\n");
                        return -EINVAL;
                }
 
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
        }
 
        if (!valid_desc_bitmap) {
-               gvt_err("vgpu%d: no valid desc in a elsp submission\n",
-                               vgpu->id);
+               gvt_vgpu_err("no valid desc in a elsp submission\n");
                return -EINVAL;
        }
 
        if (!test_bit(0, (void *)&valid_desc_bitmap) &&
                        test_bit(1, (void *)&valid_desc_bitmap)) {
-               gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
-                               vgpu->id);
+               gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
                return -EINVAL;
        }
 
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
                ret = submit_context(vgpu, ring_id, &valid_desc[i],
                                emulate_schedule_in);
                if (ret) {
-                       gvt_err("vgpu%d: fail to schedule workload\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("fail to schedule workload\n");
                        return ret;
                }
                emulate_schedule_in = false;
index 6a5ff23..da73127 100644 (file)
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
 {
        if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
                        && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
-               gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
-                               vgpu->id, addr, size);
+               gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
+                               addr, size);
                return false;
        }
        return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
 
        mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
        if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+               gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
                return -ENXIO;
        }
 
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
 
        daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
        if (dma_mapping_error(kdev, daddr)) {
-               gvt_err("fail to map dma addr\n");
+               gvt_vgpu_err("fail to map dma addr\n");
                return -EINVAL;
        }
 
@@ -735,7 +735,7 @@ retry:
                if (reclaim_one_mm(vgpu->gvt))
                        goto retry;
 
-               gvt_err("fail to allocate ppgtt shadow page\n");
+               gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
                return ERR_PTR(-ENOMEM);
        }
 
@@ -750,14 +750,14 @@ retry:
         */
        ret = init_shadow_page(vgpu, &spt->shadow_page, type);
        if (ret) {
-               gvt_err("fail to initialize shadow page for spt\n");
+               gvt_vgpu_err("fail to initialize shadow page for spt\n");
                goto err;
        }
 
        ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
                        gfn, ppgtt_write_protection_handler, NULL);
        if (ret) {
-               gvt_err("fail to initialize guest page for spt\n");
+               gvt_vgpu_err("fail to initialize guest page for spt\n");
                goto err;
        }
 
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
        if (p)
                return shadow_page_to_ppgtt_spt(p);
 
-       gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
-                       vgpu->id, mfn);
+       gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
        return NULL;
 }
 
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
        }
        s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
        if (!s) {
-               gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
-                               vgpu->id, ops->get_pfn(e));
+               gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
+                               ops->get_pfn(e));
                return -ENXIO;
        }
        return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
 
 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 {
+       struct intel_vgpu *vgpu = spt->vgpu;
        struct intel_gvt_gtt_entry e;
        unsigned long index;
        int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 
        for_each_present_shadow_entry(spt, &e, index) {
                if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
-                       gvt_err("GVT doesn't support pse bit for now\n");
+                       gvt_vgpu_err("GVT doesn't support pse bit for now\n");
                        return -EINVAL;
                }
                ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
        ppgtt_free_shadow_page(spt);
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
-                       spt->vgpu->id, spt, e.val64, e.type);
+       gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
+                       spt, e.val64, e.type);
        return ret;
 }
 
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
        }
        return s;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
-                       vgpu->id, s, we->val64, we->type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+                       s, we->val64, we->type);
        return ERR_PTR(ret);
 }
 
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
 
        for_each_present_guest_entry(spt, &ge, i) {
                if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
-                       gvt_err("GVT doesn't support pse bit now\n");
+                       gvt_vgpu_err("GVT doesn't support pse bit now\n");
                        ret = -EINVAL;
                        goto fail;
                }
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
        }
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
-                       vgpu->id, spt, ge.val64, ge.type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+                       spt, ge.val64, ge.type);
        return ret;
 }
 
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
                struct intel_vgpu_ppgtt_spt *s =
                        ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
                if (!s) {
-                       gvt_err("fail to find guest page\n");
+                       gvt_vgpu_err("fail to find guest page\n");
                        ret = -ENXIO;
                        goto fail;
                }
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
        ppgtt_set_shadow_entry(spt, &e, index);
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
-                       vgpu->id, spt, e.val64, e.type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+                       spt, e.val64, e.type);
        return ret;
 }
 
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
        }
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
-                       spt, we->val64, we->type);
+       gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
+               spt, we->val64, we->type);
        return ret;
 }
 
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
        }
        return 0;
 fail:
-       gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
-                       vgpu->id, spt, we->val64, we->type);
+       gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
+                       spt, we->val64, we->type);
        return ret;
 }
 
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
 
                spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
                if (IS_ERR(spt)) {
-                       gvt_err("fail to populate guest root pointer\n");
+                       gvt_vgpu_err("fail to populate guest root pointer\n");
                        ret = PTR_ERR(spt);
                        goto fail;
                }
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
 
        ret = gtt->mm_alloc_page_table(mm);
        if (ret) {
-               gvt_err("fail to allocate page table for mm\n");
+               gvt_vgpu_err("fail to allocate page table for mm\n");
                goto fail;
        }
 
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
        }
        return mm;
 fail:
-       gvt_err("fail to create mm\n");
+       gvt_vgpu_err("fail to create mm\n");
        if (mm)
                intel_gvt_mm_unreference(mm);
        return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
                        mm->page_table_level, gma, gpa);
        return gpa;
 err:
-       gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+       gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
        return INTEL_GVT_INVALID_ADDR;
 }
 
@@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        if (ops->test_present(&e)) {
                ret = gtt_entry_p2m(vgpu, &e, &m);
                if (ret) {
-                       gvt_err("vgpu%d: fail to translate guest gtt entry\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("fail to translate guest gtt entry\n");
                        return ret;
                }
        } else {
@@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 
        scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
        if (!scratch_pt) {
-               gvt_err("fail to allocate scratch page\n");
+               gvt_vgpu_err("fail to allocate scratch page\n");
                return -ENOMEM;
        }
 
        daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
                        4096, PCI_DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, daddr)) {
-               gvt_err("fail to dmamap scratch_pt\n");
+               gvt_vgpu_err("fail to dmamap scratch_pt\n");
                __free_page(virt_to_page(scratch_pt));
                return -ENOMEM;
        }
@@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
        ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
                        NULL, 1, 0);
        if (IS_ERR(ggtt_mm)) {
-               gvt_err("fail to create mm for ggtt.\n");
+               gvt_vgpu_err("fail to create mm for ggtt.\n");
                return PTR_ERR(ggtt_mm);
        }
 
@@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
        for (i = 0; i < preallocated_oos_pages; i++) {
                oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
                if (!oos_page) {
-                       gvt_err("fail to pre-allocate oos page\n");
                        ret = -ENOMEM;
                        goto fail;
                }
@@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
                mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
                                pdp, page_table_level, 0);
                if (IS_ERR(mm)) {
-                       gvt_err("fail to create mm\n");
+                       gvt_vgpu_err("fail to create mm\n");
                        return PTR_ERR(mm);
                }
        }
@@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 
        mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
        if (!mm) {
-               gvt_err("fail to find ppgtt instance.\n");
+               gvt_vgpu_err("fail to find ppgtt instance.\n");
                return -EINVAL;
        }
        intel_gvt_mm_unreference(mm);
index 2379192..6dfc48b 100644 (file)
@@ -162,7 +162,6 @@ struct intel_vgpu {
        atomic_t running_workload_num;
        DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
        struct i915_gem_context *shadow_ctx;
-       struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
        struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
        struct intel_gvt_gtt gtt;
        struct intel_gvt_opregion opregion;
        struct intel_gvt_workload_scheduler scheduler;
+       struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
        DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
        struct intel_vgpu_type *types;
        unsigned int num_types;
index 8e43395..eaff45d 100644 (file)
@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
                                        GVT_FAILSAFE_UNSUPPORTED_GUEST);
 
                if (!vgpu->mmio.disable_warn_untrack) {
-                       gvt_err("vgpu%d: found oob fence register access\n",
-                                       vgpu->id);
-                       gvt_err("vgpu%d: total fence %d, access fence %d\n",
-                                       vgpu->id, vgpu_fence_sz(vgpu),
-                                       fence_num);
+                       gvt_vgpu_err("found oob fence register access\n");
+                       gvt_vgpu_err("total fence %d, access fence %d\n",
+                                       vgpu_fence_sz(vgpu), fence_num);
                }
                memset(p_data, 0, bytes);
                return -EINVAL;
@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                        break;
                default:
                        /*should not hit here*/
-                       gvt_err("invalid forcewake offset 0x%x\n", offset);
+                       gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
                        return -EINVAL;
                }
        } else {
@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
                fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
                fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
        } else {
-               gvt_err("Invalid train pattern %d\n", train_pattern);
+               gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
                return -EINVAL;
        }
 
@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
        else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
                index = FDI_RX_IMR_TO_PIPE(offset);
        else {
-               gvt_err("Unsupport registers %x\n", offset);
+               gvt_vgpu_err("Unsupport registers %x\n", offset);
                return -EINVAL;
        }
 
@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        u32 data;
 
        if (!dpy_is_valid_port(port_index)) {
-               gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+               gvt_vgpu_err("Unsupported DP port access!\n");
                return 0;
        }
 
@@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
 
        if (i == num) {
                if (num == SBI_REG_MAX) {
-                       gvt_err("vgpu%d: SBI caching meets maximum limits\n",
-                                       vgpu->id);
+                       gvt_vgpu_err("SBI caching meets maximum limits\n");
                        return;
                }
                display->sbi.number++;
@@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
                break;
        }
        if (invalid_read)
-               gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+               gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
                                offset, bytes, *(u32 *)p_data);
        vgpu->pv_notified = true;
        return 0;
@@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
        case 1: /* Remove this in guest driver. */
                break;
        default:
-               gvt_err("Invalid PV notification %d\n", notification);
+               gvt_vgpu_err("Invalid PV notification %d\n", notification);
        }
        return ret;
 }
@@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
                break;
        default:
-               gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+               gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
                                offset, bytes, data);
                break;
        }
@@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        if (execlist->elsp_dwords.index == 3) {
                ret = intel_vgpu_submit_execlist(vgpu, ring_id);
                if(ret)
-                       gvt_err("fail submit workload on ring %d\n", ring_id);
+                       gvt_vgpu_err("fail submit workload on ring %d\n",
+                               ring_id);
        }
 
        ++execlist->elsp_dwords.index;
@@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        write_vreg(vgpu, offset, p_data, bytes);
        return 0;
 }
+
+/**
+ * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
+ * force-nopriv register
+ *
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if the register is in force-nonpriv whitelist;
+ * False if outside;
+ */
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+                                         unsigned int offset)
+{
+       return in_whitelist(offset);
+}
index 84d8016..1ea3eb2 100644 (file)
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
 
 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 {
-       struct intel_vgpu *vgpu;
+       struct intel_vgpu *vgpu = NULL;
        struct intel_vgpu_type *type;
        struct device *pdev;
        void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 
        type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
        if (!type) {
-               gvt_err("failed to find type %s to create\n",
+               gvt_vgpu_err("failed to find type %s to create\n",
                                                kobject_name(kobj));
                ret = -EINVAL;
                goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        vgpu = intel_gvt_ops->vgpu_create(gvt, type);
        if (IS_ERR_OR_NULL(vgpu)) {
                ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
-               gvt_err("failed to create intel vgpu: %d\n", ret);
+               gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
                goto out;
        }
 
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
                                &vgpu->vdev.iommu_notifier);
        if (ret != 0) {
-               gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+               gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
+                       ret);
                goto out;
        }
 
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
                                &vgpu->vdev.group_notifier);
        if (ret != 0) {
-               gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+               gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
+                       ret);
                goto undo_iommu;
        }
 
@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 
 
        if (index >= VFIO_PCI_NUM_REGIONS) {
-               gvt_err("invalid index: %u\n", index);
+               gvt_vgpu_err("invalid index: %u\n", index);
                return -EINVAL;
        }
 
@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
        case VFIO_PCI_VGA_REGION_INDEX:
        case VFIO_PCI_ROM_REGION_INDEX:
        default:
-               gvt_err("unsupported region: %u\n", index);
+               gvt_vgpu_err("unsupported region: %u\n", index);
        }
 
        return ret == 0 ? count : ret;
@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
 
                trigger = eventfd_ctx_fdget(fd);
                if (IS_ERR(trigger)) {
-                       gvt_err("eventfd_ctx_fdget failed\n");
+                       gvt_vgpu_err("eventfd_ctx_fdget failed\n");
                        return PTR_ERR(trigger);
                }
                vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                        ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
                                                VFIO_PCI_NUM_IRQS, &data_size);
                        if (ret) {
-                               gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+                               gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
                                return -EINVAL;
                        }
                        if (data_size) {
@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
        kvm = vgpu->vdev.kvm;
        if (!kvm || kvm->mm != current->mm) {
-               gvt_err("KVM is required to use Intel vGPU\n");
+               gvt_vgpu_err("KVM is required to use Intel vGPU\n");
                return -ESRCH;
        }
 
@@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
+       struct intel_vgpu *vgpu = info->vgpu;
+
        if (!info) {
-               gvt_err("kvmgt_guest_info invalid\n");
+               gvt_vgpu_err("kvmgt_guest_info invalid\n");
                return false;
        }
 
@@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
        unsigned long iova, pfn;
        struct kvmgt_guest_info *info;
        struct device *dev;
+       struct intel_vgpu *vgpu;
        int rc;
 
        if (!handle_valid(handle))
                return INTEL_GVT_INVALID_ADDR;
 
        info = (struct kvmgt_guest_info *)handle;
+       vgpu = info->vgpu;
        iova = gvt_cache_find(info->vgpu, gfn);
        if (iova != INTEL_GVT_INVALID_ADDR)
                return iova;
@@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
        dev = mdev_dev(info->vgpu->vdev.mdev);
        rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
        if (rc != 1) {
-               gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+               gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+                       gfn, rc);
                return INTEL_GVT_INVALID_ADDR;
        }
        /* transfer to host iova for GFX to use DMA */
        rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
        if (rc) {
-               gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+               gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
                vfio_unpin_pages(dev, &gfn, 1);
                return INTEL_GVT_INVALID_ADDR;
        }
@@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
 {
        struct kvmgt_guest_info *info;
        struct kvm *kvm;
-       int ret;
+       int idx, ret;
        bool kthread = current->mm == NULL;
 
        if (!handle_valid(handle))
@@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
        if (kthread)
                use_mm(kvm->mm);
 
+       idx = srcu_read_lock(&kvm->srcu);
        ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
                      kvm_read_guest(kvm, gpa, buf, len);
+       srcu_read_unlock(&kvm->srcu, idx);
 
        if (kthread)
                unuse_mm(kvm->mm);
index 60b698c..1ba3bdb 100644 (file)
@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                        ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
                                        p_data, bytes);
                        if (ret) {
-                               gvt_err("vgpu%d: guest page read error %d, "
+                               gvt_vgpu_err("guest page read error %d, "
                                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
-                                       vgpu->id, ret,
-                                       gp->gfn, pa, *(u32 *)p_data, bytes);
+                                       ret, gp->gfn, pa, *(u32 *)p_data,
+                                       bytes);
                        }
                        mutex_unlock(&gvt->lock);
                        return ret;
@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 
                if (!vgpu->mmio.disable_warn_untrack) {
-                       gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
-                               vgpu->id, offset, bytes, *(u32 *)p_data);
+                       gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
+                               offset, bytes, *(u32 *)p_data);
 
                        if (offset == 0x206c) {
-                               gvt_err("------------------------------------------\n");
-                               gvt_err("vgpu%d: likely triggers a gfx reset\n",
-                                       vgpu->id);
-                               gvt_err("------------------------------------------\n");
+                               gvt_vgpu_err("------------------------------------------\n");
+                               gvt_vgpu_err("likely triggers a gfx reset\n");
+                               gvt_vgpu_err("------------------------------------------\n");
                                vgpu->mmio.disable_warn_untrack = true;
                        }
                }
@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        mutex_unlock(&gvt->lock);
        return 0;
 err:
-       gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
-                       vgpu->id, offset, bytes);
+       gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
+                       offset, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
 }
@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                if (gp) {
                        ret = gp->handler(gp, pa, p_data, bytes);
                        if (ret) {
-                               gvt_err("vgpu%d: guest page write error %d, "
-                                       "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
-                                       vgpu->id, ret,
-                                       gp->gfn, pa, *(u32 *)p_data, bytes);
+                               gvt_err("guest page write error %d, "
+                                       "gfn 0x%lx, pa 0x%llx, "
+                                       "var 0x%x, len %d\n",
+                                       ret, gp->gfn, pa,
+                                       *(u32 *)p_data, bytes);
                        }
                        mutex_unlock(&gvt->lock);
                        return ret;
@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 
                        /* all register bits are RO. */
                        if (ro_mask == ~(u64)0) {
-                               gvt_err("vgpu%d: try to write RO reg %x\n",
-                                               vgpu->id, offset);
+                               gvt_vgpu_err("try to write RO reg %x\n",
+                                       offset);
                                ret = 0;
                                goto out;
                        }
@@ -360,8 +360,8 @@ out:
        mutex_unlock(&gvt->lock);
        return 0;
 err:
-       gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
-                       vgpu->id, offset, bytes);
+       gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
+                    bytes);
        mutex_unlock(&gvt->lock);
        return ret;
 }
index 3bc620f..a3a0270 100644 (file)
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
                                 void *p_data, unsigned int bytes);
 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                                  void *p_data, unsigned int bytes);
+
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+                                         unsigned int offset);
 #endif
index 5d1caf9..3117991 100644 (file)
@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
                mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
                        + i * PAGE_SIZE);
                if (mfn == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("fail to get MFN from VA\n");
+                       gvt_vgpu_err("fail to get MFN from VA\n");
                        return -EINVAL;
                }
                ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
                                vgpu_opregion(vgpu)->gfn[i],
                                mfn, 1, map);
                if (ret) {
-                       gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+                       gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
+                               ret);
                        return ret;
                }
        }
@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
        parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
 
        if (!(swsci & SWSCI_SCI_SELECT)) {
-               gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+               gvt_vgpu_err("requesting SMI service\n");
                return 0;
        }
        /* ignore non 0->1 trasitions */
@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
        func = GVT_OPREGION_FUNC(*scic);
        subfunc = GVT_OPREGION_SUBFUNC(*scic);
        if (!querying_capabilities(*scic)) {
-               gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+               gvt_vgpu_err("requesting runtime service: func \"%s\","
                                " subfunc \"%s\"\n",
-                               vgpu->id,
                                opregion_func_name(func),
                                opregion_subfunc_name(subfunc));
                /*
index 73f052a..95ee091 100644 (file)
@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
        I915_WRITE_FW(reg, 0x1);
 
        if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
-               gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+               gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
        else
                vgpu_vreg(vgpu, regs[ring_id]) = 0;
 
index 06c9584..34b9acd 100644 (file)
@@ -101,7 +101,7 @@ struct tbs_sched_data {
        struct list_head runq_head;
 };
 
-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
 
 static void tbs_sched_func(struct work_struct *work)
 {
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
                return;
 
        list_add_tail(&vgpu_data->list, &sched_data->runq_head);
-       schedule_delayed_work(&sched_data->work, sched_data->period);
+       schedule_delayed_work(&sched_data->work, 0);
 }
 
 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
index d3a56c9..c4353ed 100644 (file)
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
                                (u32)((workload->ctx_desc.lrca + i) <<
                                GTT_PAGE_SHIFT));
                if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("Invalid guest context descriptor\n");
+                       gvt_vgpu_err("Invalid guest context descriptor\n");
                        return -EINVAL;
                }
 
@@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 static int shadow_context_status_change(struct notifier_block *nb,
                unsigned long action, void *data)
 {
-       struct intel_vgpu *vgpu = container_of(nb,
-                       struct intel_vgpu, shadow_ctx_notifier_block);
-       struct drm_i915_gem_request *req =
-               (struct drm_i915_gem_request *)data;
-       struct intel_gvt_workload_scheduler *scheduler =
-               &vgpu->gvt->scheduler;
+       struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+       struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+                               shadow_ctx_notifier_block[req->engine->id]);
+       struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload =
                scheduler->current_workload[req->engine->id];
 
@@ -175,7 +173,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        int ring_id = workload->ring_id;
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
        struct drm_i915_gem_request *rq;
+       struct intel_vgpu *vgpu = workload->vgpu;
        int ret;
 
        gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -187,9 +187,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       /* pin shadow context by gvt even the shadow context will be pinned
+        * when i915 alloc request. That is because gvt will update the guest
+        * context from shadow context when workload is completed, and at that
+        * moment, i915 may already unpined the shadow context to make the
+        * shadow_ctx pages invalid. So gvt need to pin itself. After update
+        * the guest context, gvt can unpin the shadow_ctx safely.
+        */
+       ret = engine->context_pin(engine, shadow_ctx);
+       if (ret) {
+               gvt_vgpu_err("fail to pin shadow context\n");
+               workload->status = ret;
+               mutex_unlock(&dev_priv->drm.struct_mutex);
+               return ret;
+       }
+
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
-               gvt_err("fail to allocate gem request\n");
+               gvt_vgpu_err("fail to allocate gem request\n");
                ret = PTR_ERR(rq);
                goto out;
        }
@@ -202,9 +217,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        if (ret)
                goto out;
 
-       ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
-       if (ret)
-               goto out;
+       if ((workload->ring_id == RCS) &&
+           (workload->wa_ctx.indirect_ctx.size != 0)) {
+               ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+               if (ret)
+                       goto out;
+       }
 
        ret = populate_shadow_context(workload);
        if (ret)
@@ -227,6 +245,9 @@ out:
 
        if (!IS_ERR_OR_NULL(rq))
                i915_add_request_no_flush(rq);
+       else
+               engine->context_unpin(engine, shadow_ctx);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
 }
@@ -322,7 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
                                (u32)((workload->ctx_desc.lrca + i) <<
                                        GTT_PAGE_SHIFT));
                if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_err("invalid guest context descriptor\n");
+                       gvt_vgpu_err("invalid guest context descriptor\n");
                        return;
                }
 
@@ -376,6 +397,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
         * For the workload w/o request, directly complete the workload.
         */
        if (workload->req) {
+               struct drm_i915_private *dev_priv =
+                       workload->vgpu->gvt->dev_priv;
+               struct intel_engine_cs *engine =
+                       dev_priv->engine[workload->ring_id];
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -388,6 +413,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                         INTEL_GVT_EVENT_MAX)
                                intel_vgpu_trigger_virtual_event(vgpu, event);
                }
+               mutex_lock(&dev_priv->drm.struct_mutex);
+               /* unpin shadow ctx as the shadow_ctx update is done */
+               engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -417,6 +446,7 @@ static int workload_thread(void *priv)
        int ring_id = p->ring_id;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload = NULL;
+       struct intel_vgpu *vgpu = NULL;
        int ret;
        bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -459,25 +489,14 @@ static int workload_thread(void *priv)
                mutex_unlock(&gvt->lock);
 
                if (ret) {
-                       gvt_err("fail to dispatch workload, skip\n");
+                       vgpu = workload->vgpu;
+                       gvt_vgpu_err("fail to dispatch workload, skip\n");
                        goto complete;
                }
 
                gvt_dbg_sched("ring id %d wait workload %p\n",
                                workload->ring_id, workload);
-retry:
-               i915_wait_request(workload->req,
-                                        0, MAX_SCHEDULE_TIMEOUT);
-               /* I915 has replay mechanism and a request will be replayed
-                * if there is i915 reset. So the seqno will be updated anyway.
-                * If the seqno is not updated yet after waiting, which means
-                * the replay may still be in progress and we can wait again.
-                */
-               if (!i915_gem_request_completed(workload->req)) {
-                       gvt_dbg_sched("workload %p not completed, wait again\n",
-                                       workload);
-                       goto retry;
-               }
+               i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
 
 complete:
                gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -513,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-       int i;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id i;
 
        gvt_dbg_core("clean workload scheduler\n");
 
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               if (scheduler->thread[i]) {
-                       kthread_stop(scheduler->thread[i]);
-                       scheduler->thread[i] = NULL;
-               }
+       for_each_engine(engine, gvt->dev_priv, i) {
+               atomic_notifier_chain_unregister(
+                                       &engine->context_status_notifier,
+                                       &gvt->shadow_ctx_notifier_block[i]);
+               kthread_stop(scheduler->thread[i]);
        }
 }
 
@@ -529,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct workload_thread_param *param = NULL;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id i;
        int ret;
-       int i;
 
        gvt_dbg_core("init workload scheduler\n");
 
        init_waitqueue_head(&scheduler->workload_complete_wq);
 
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               /* check ring mask at init time */
-               if (!HAS_ENGINE(gvt->dev_priv, i))
-                       continue;
-
+       for_each_engine(engine, gvt->dev_priv, i) {
                init_waitqueue_head(&scheduler->waitq[i]);
 
                param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
                        ret = PTR_ERR(scheduler->thread[i]);
                        goto err;
                }
+
+               gvt->shadow_ctx_notifier_block[i].notifier_call =
+                                       shadow_context_status_change;
+               atomic_notifier_chain_register(&engine->context_status_notifier,
+                                       &gvt->shadow_ctx_notifier_block[i]);
        }
        return 0;
 err:
@@ -570,9 +592,6 @@ err:
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-       atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-                       &vgpu->shadow_ctx_notifier_block);
-
        i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -587,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
        vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-       vgpu->shadow_ctx_notifier_block.notifier_call =
-               shadow_context_status_change;
-
-       atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-                                      &vgpu->shadow_ctx_notifier_block);
        return 0;
 }
index e703556..1c75402 100644 (file)
@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_IRQ_ACTIVE:
        case I915_PARAM_ALLOW_BATCHBUFFER:
        case I915_PARAM_LAST_DISPATCH:
+       case I915_PARAM_HAS_EXEC_CONSTANTS:
                /* Reject all old ums/dri params. */
                return -ENODEV;
        case I915_PARAM_CHIPSET_ID:
@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_BSD2:
                value = !!dev_priv->engine[VCS2];
                break;
-       case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_GEN(dev_priv) >= 4;
-               break;
        case I915_PARAM_HAS_LLC:
                value = HAS_LLC(dev_priv);
                break;
@@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
-       i915_gem_reset_finish(dev_priv);
+       i915_gem_reset(dev_priv);
        intel_overlay_reset(dev_priv);
 
        /* Ok, now get things going again... */
@@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
        i915_queue_hangcheck(dev_priv);
 
 wakeup:
+       i915_gem_reset_finish(dev_priv);
        enable_irq(dev_priv->drm.irq);
        wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
        return;
index 0a4b42d..1e53c31 100644 (file)
@@ -293,6 +293,7 @@ enum plane_id {
        PLANE_PRIMARY,
        PLANE_SPRITE0,
        PLANE_SPRITE1,
+       PLANE_SPRITE2,
        PLANE_CURSOR,
        I915_MAX_PLANES,
 };
@@ -1324,7 +1325,7 @@ struct intel_gen6_power_mgmt {
        unsigned boosts;
 
        /* manual wa residency calculations */
-       struct intel_rps_ei up_ei, down_ei;
+       struct intel_rps_ei ei;
 
        /*
         * Protects RPS/RC6 register access and PCU communication.
@@ -2063,8 +2064,6 @@ struct drm_i915_private {
 
        const struct intel_device_info info;
 
-       int relative_constants_mode;
-
        void __iomem *regs;
 
        struct intel_uncore uncore;
@@ -3341,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
 }
 
 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
index 6908123..67b1fc5 100644 (file)
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
+       ret = -ENODEV;
+       if (obj->ops->pwrite)
+               ret = obj->ops->pwrite(obj, args);
+       if (ret != -ENODEV)
+               goto err;
+
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
         */
        shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
        obj->mm.madv = __I915_MADV_PURGED;
+       obj->mm.pages = ERR_PTR(-EFAULT);
 }
 
 /* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 
        __i915_gem_object_reset_page_iter(obj);
 
-       obj->ops->put_pages(obj, pages);
+       if (!IS_ERR(pages))
+               obj->ops->put_pages(obj, pages);
+
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        if (err)
                return err;
 
-       if (unlikely(!obj->mm.pages)) {
+       if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
                err = ____i915_gem_object_get_pages(obj);
                if (err)
                        goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
        pinned = true;
        if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(!obj->mm.pages)) {
+               if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
                        ret = ____i915_gem_object_get_pages(obj);
                        if (ret)
                                goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
        goto out_unlock;
 }
 
+static int
+i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
+                          const struct drm_i915_gem_pwrite *arg)
+{
+       struct address_space *mapping = obj->base.filp->f_mapping;
+       char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+       u64 remain, offset;
+       unsigned int pg;
+
+       /* Before we instantiate/pin the backing store for our use, we
+        * can prepopulate the shmemfs filp efficiently using a write into
+        * the pagecache. We avoid the penalty of instantiating all the
+        * pages, important if the user is just writing to a few and never
+        * uses the object on the GPU, and using a direct write into shmemfs
+        * allows it to avoid the cost of retrieving a page (either swapin
+        * or clearing-before-use) before it is overwritten.
+        */
+       if (READ_ONCE(obj->mm.pages))
+               return -ENODEV;
+
+       /* Before the pages are instantiated the object is treated as being
+        * in the CPU domain. The pages will be clflushed as required before
+        * use, and we can freely write into the pages directly. If userspace
+        * races pwrite with any other operation; corruption will ensue -
+        * that is userspace's prerogative!
+        */
+
+       remain = arg->size;
+       offset = arg->offset;
+       pg = offset_in_page(offset);
+
+       do {
+               unsigned int len, unwritten;
+               struct page *page;
+               void *data, *vaddr;
+               int err;
+
+               len = PAGE_SIZE - pg;
+               if (len > remain)
+                       len = remain;
+
+               err = pagecache_write_begin(obj->base.filp, mapping,
+                                           offset, len, 0,
+                                           &page, &data);
+               if (err < 0)
+                       return err;
+
+               vaddr = kmap(page);
+               unwritten = copy_from_user(vaddr + pg, user_data, len);
+               kunmap(page);
+
+               err = pagecache_write_end(obj->base.filp, mapping,
+                                         offset, len, len - unwritten,
+                                         page, data);
+               if (err < 0)
+                       return err;
+
+               if (unwritten)
+                       return -EFAULT;
+
+               remain -= len;
+               user_data += len;
+               offset += len;
+               pg = 0;
+       } while (remain);
+
+       return 0;
+}
+
 static bool ban_context(const struct i915_gem_context *ctx)
 {
        return (i915_gem_context_is_bannable(ctx) &&
@@ -2641,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
        for_each_engine(engine, dev_priv, id) {
                struct drm_i915_gem_request *request;
 
+               /* Prevent request submission to the hardware until we have
+                * completed the reset in i915_gem_reset_finish(). If a request
+                * is completed by one engine, it may then queue a request
+                * to a second via its engine->irq_tasklet *just* as we are
+                * calling engine->init_hw() and also writing the ELSP.
+                * Turning off the engine->irq_tasklet until the reset is over
+                * prevents the race.
+                */
                tasklet_kill(&engine->irq_tasklet);
+               tasklet_disable(&engine->irq_tasklet);
 
                if (engine_stalled(engine)) {
                        request = i915_gem_find_active_request(engine);
@@ -2756,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        engine->reset_hw(engine, request);
 }
 
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -2778,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
        }
 }
 
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       for_each_engine(engine, dev_priv, id)
+               tasklet_enable(&engine->irq_tasklet);
+}
+
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
        dma_fence_set_error(&request->fence, -EIO);
@@ -3029,6 +3127,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
                if (args->timeout_ns < 0)
                        args->timeout_ns = 0;
+
+               /*
+                * Apparently ktime isn't accurate enough and occasionally has a
+                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+                * things up to make the test happy. We allow up to 1 jiffy.
+                *
+                * This is a regression from the timespec->ktime conversion.
+                */
+               if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+                       args->timeout_ns = 0;
        }
 
        i915_gem_object_put(obj);
@@ -3974,8 +4082,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
                 I915_GEM_OBJECT_IS_SHRINKABLE,
+
        .get_pages = i915_gem_object_get_pages_gtt,
        .put_pages = i915_gem_object_put_pages_gtt,
+
+       .pwrite = i915_gem_object_pwrite_gtt,
 };
 
 struct drm_i915_gem_object *
@@ -4583,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
        init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
-       dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
        dev_priv->mm.interruptible = true;
index 17f90c6..e2d83b6 100644 (file)
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        ctx->ring_size = 4 * PAGE_SIZE;
        ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
                             GEN8_CTX_ADDRESSING_MODE_SHIFT;
-       ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
        /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
         * present or not in use we still need a small bias as ring wraparound
index 0ac750b..e9c008f 100644 (file)
@@ -160,9 +160,6 @@ struct i915_gem_context {
        /** desc_template: invariant fields for the HW context descriptor */
        u32 desc_template;
 
-       /** status_notifier: list of callbacks for context-switch changes */
-       struct atomic_notifier_head status_notifier;
-
        /** guilty_count: How many times this context has caused a GPU hang. */
        unsigned int guilty_count;
        /**
index c181b1b..3be2503 100644 (file)
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                 * those as well to make room for our guard pages.
                 */
                if (check_color) {
-                       if (vma->node.start + vma->node.size == node->start) {
-                               if (vma->node.color == node->color)
+                       if (node->start + node->size == target->start) {
+                               if (node->color == target->color)
                                        continue;
                        }
-                       if (vma->node.start == node->start + node->size) {
-                               if (vma->node.color == node->color)
+                       if (node->start == target->start + target->size) {
+                               if (node->color == target->color)
                                        continue;
                        }
                }
index d02cfae..30e0675 100644 (file)
@@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
               struct drm_i915_gem_execbuffer2 *args,
               struct list_head *vmas)
 {
-       struct drm_i915_private *dev_priv = params->request->i915;
        u64 exec_start, exec_len;
-       int instp_mode;
-       u32 instp_mask;
        int ret;
 
        ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
        if (ret)
                return ret;
 
-       instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-       instp_mask = I915_EXEC_CONSTANTS_MASK;
-       switch (instp_mode) {
-       case I915_EXEC_CONSTANTS_REL_GENERAL:
-       case I915_EXEC_CONSTANTS_ABSOLUTE:
-       case I915_EXEC_CONSTANTS_REL_SURFACE:
-               if (instp_mode != 0 && params->engine->id != RCS) {
-                       DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-                       return -EINVAL;
-               }
-
-               if (instp_mode != dev_priv->relative_constants_mode) {
-                       if (INTEL_INFO(dev_priv)->gen < 4) {
-                               DRM_DEBUG("no rel constants on pre-gen4\n");
-                               return -EINVAL;
-                       }
-
-                       if (INTEL_INFO(dev_priv)->gen > 5 &&
-                           instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-                               DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-                               return -EINVAL;
-                       }
-
-                       /* The HW changed the meaning on this bit on gen6 */
-                       if (INTEL_INFO(dev_priv)->gen >= 6)
-                               instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-               }
-               break;
-       default:
-               DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+       if (args->flags & I915_EXEC_CONSTANTS_MASK) {
+               DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
                return -EINVAL;
        }
 
-       if (params->engine->id == RCS &&
-           instp_mode != dev_priv->relative_constants_mode) {
-               struct intel_ring *ring = params->request->ring;
-
-               ret = intel_ring_begin(params->request, 4);
-               if (ret)
-                       return ret;
-
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(ring, INSTPM);
-               intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-               intel_ring_advance(ring);
-
-               dev_priv->relative_constants_mode = instp_mode;
-       }
-
        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
                ret = i915_reset_gen7_sol_offsets(params->request);
                if (ret)
index bf90b07..76b80a0 100644 (file)
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
        struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
        void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
+       int (*pwrite)(struct drm_i915_gem_object *,
+                     const struct drm_i915_gem_pwrite *);
+
        int (*dmabuf_export)(struct drm_i915_gem_object *);
        void (*release)(struct drm_i915_gem_object *);
 };
index 401006b..d5d2b4c 100644 (file)
@@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
                                I915_SHRINK_BOUND |
                                I915_SHRINK_UNBOUND |
                                I915_SHRINK_ACTIVE);
-       rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+       synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
 
        return freed;
 }
index e6ffef2..b6c886a 100644 (file)
@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
        ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
 }
 
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
-                        const struct intel_rps_ei *old,
-                        const struct intel_rps_ei *now,
-                        int threshold)
-{
-       u64 time, c0;
-       unsigned int mul = 100;
-
-       if (old->cz_clock == 0)
-               return false;
-
-       if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
-               mul <<= 8;
-
-       time = now->cz_clock - old->cz_clock;
-       time *= threshold * dev_priv->czclk_freq;
-
-       /* Workload can be split between render + media, e.g. SwapBuffers
-        * being blitted in X after being rendered in mesa. To account for
-        * this we need to combine both engines into our activity counter.
-        */
-       c0 = now->render_c0 - old->render_c0;
-       c0 += now->media_c0 - old->media_c0;
-       c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
-       return c0 >= time;
-}
-
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
 {
-       vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
-       dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+       memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
 }
 
 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
+       const struct intel_rps_ei *prev = &dev_priv->rps.ei;
        struct intel_rps_ei now;
        u32 events = 0;
 
-       if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+       if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
                return 0;
 
        vlv_c0_read(dev_priv, &now);
        if (now.cz_clock == 0)
                return 0;
 
-       if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
-               if (!vlv_c0_above(dev_priv,
-                                 &dev_priv->rps.down_ei, &now,
-                                 dev_priv->rps.down_threshold))
-                       events |= GEN6_PM_RP_DOWN_THRESHOLD;
-               dev_priv->rps.down_ei = now;
-       }
+       if (prev->cz_clock) {
+               u64 time, c0;
+               unsigned int mul;
+
+               mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+               if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+                       mul <<= 8;
 
-       if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
-               if (vlv_c0_above(dev_priv,
-                                &dev_priv->rps.up_ei, &now,
-                                dev_priv->rps.up_threshold))
-                       events |= GEN6_PM_RP_UP_THRESHOLD;
-               dev_priv->rps.up_ei = now;
+               time = now.cz_clock - prev->cz_clock;
+               time *= dev_priv->czclk_freq;
+
+               /* Workload can be split between render + media,
+                * e.g. SwapBuffers being blitted in X after being rendered in
+                * mesa. To account for this we need to combine both engines
+                * into our activity counter.
+                */
+               c0 = now.render_c0 - prev->render_c0;
+               c0 += now.media_c0 - prev->media_c0;
+               c0 *= mul;
+
+               if (c0 > time * dev_priv->rps.up_threshold)
+                       events = GEN6_PM_RP_UP_THRESHOLD;
+               else if (c0 < time * dev_priv->rps.down_threshold)
+                       events = GEN6_PM_RP_DOWN_THRESHOLD;
        }
 
+       dev_priv->rps.ei = now;
        return events;
 }
 
@@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        /* Let's track the enabled rps events */
        if (IS_VALLEYVIEW(dev_priv))
                /* WaGsvRC0ResidencyMethod:vlv */
-               dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+               dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
        else
                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
@@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        if (!IS_GEN2(dev_priv))
                dev->vblank_disable_immediate = true;
 
+       /* Most platforms treat the display irq block as an always-on
+        * power domain. vlv/chv can disable it at runtime and need
+        * special care to avoid writing any of the display block registers
+        * outside of the power domain. We defer setting up the display irqs
+        * in this case to the runtime pm.
+        */
+       dev_priv->display_irqs_enabled = true;
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               dev_priv->display_irqs_enabled = false;
+
        dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
index 155906e..df20e9b 100644 (file)
@@ -512,10 +512,36 @@ err_unpin:
        return ret;
 }
 
+static void
+i915_vma_remove(struct i915_vma *vma)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+
+       drm_mm_remove_node(&vma->node);
+       list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        */
+       if (--obj->bind_count == 0)
+               list_move_tail(&obj->global_link,
+                              &to_i915(obj->base.dev)->mm.unbound_list);
+
+       /* And finally now the object is completely decoupled from this vma,
+        * we can drop its hold on the backing storage and allow it to be
+        * reaped by the shrinker.
+        */
+       i915_gem_object_unpin_pages(obj);
+       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
 int __i915_vma_do_pin(struct i915_vma *vma,
                      u64 size, u64 alignment, u64 flags)
 {
-       unsigned int bound = vma->flags;
+       const unsigned int bound = vma->flags;
        int ret;
 
        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 
        if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
                ret = -EBUSY;
-               goto err;
+               goto err_unpin;
        }
 
        if ((bound & I915_VMA_BIND_MASK) == 0) {
                ret = i915_vma_insert(vma, size, alignment, flags);
                if (ret)
-                       goto err;
+                       goto err_unpin;
        }
 
        ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
        if (ret)
-               goto err;
+               goto err_remove;
 
        if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
                __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
        GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
        return 0;
 
-err:
+err_remove:
+       if ((bound & I915_VMA_BIND_MASK) == 0) {
+               GEM_BUG_ON(vma->pages);
+               i915_vma_remove(vma);
+       }
+err_unpin:
        __i915_vma_unpin(vma);
        return ret;
 }
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
-       drm_mm_remove_node(&vma->node);
-       list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
        if (vma->pages != obj->mm.pages) {
                GEM_BUG_ON(!vma->pages);
                sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
        }
        vma->pages = NULL;
 
-       /* Since the unbound list is global, only move to that list if
-        * no more VMAs exist. */
-       if (--obj->bind_count == 0)
-               list_move_tail(&obj->global_link,
-                              &to_i915(obj->base.dev)->mm.unbound_list);
-
-       /* And finally now the object is completely decoupled from this vma,
-        * we can drop its hold on the backing storage and allow it to be
-        * reaped by the shrinker.
-        */
-       i915_gem_object_unpin_pages(obj);
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+       i915_vma_remove(vma);
 
 destroy:
        if (unlikely(i915_vma_is_closed(vma)))
index 0085bc7..de219b7 100644 (file)
@@ -35,7 +35,6 @@
  */
 
 #define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
 #define GLK_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
 
 #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
index 0134167..ed1f4f2 100644 (file)
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
        /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
        crtc->base.mode = crtc->base.state->mode;
 
-       DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
-                     old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
-                     pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
        /*
         * Update pipe size and adjust fitter if needed: the reason for this is
         * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
        struct intel_crtc_scaler_state *scaler_state =
                &crtc->config->scaler_state;
 
-       DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
        if (crtc->config->pch_pfit.enabled) {
                int id;
 
-               if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
-                       DRM_ERROR("Requesting pfit without getting a scaler first\n");
+               if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
                        return;
-               }
 
                id = scaler_state->scaler_id;
                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
                        PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
                I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
                I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
-               DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
        }
 }
 
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
        } while (progress);
 }
 
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+
+       intel_atomic_helper_free_state(dev_priv);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
         * can happen also when the device is completely off.
         */
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+       intel_atomic_helper_free_state(dev_priv);
 }
 
 static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
                to_intel_atomic_state(old_crtc_state->state);
        bool modeset = needs_modeset(crtc->state);
 
+       if (!modeset &&
+           (intel_cstate->base.color_mgmt_changed ||
+            intel_cstate->update_pipe)) {
+               intel_color_set_csc(crtc->state);
+               intel_color_load_luts(crtc->state);
+       }
+
        /* Perform vblank evasion around commit operation */
        intel_pipe_update_start(intel_crtc);
 
        if (modeset)
                goto out;
 
-       if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
-               intel_color_set_csc(crtc->state);
-               intel_color_load_luts(crtc->state);
-       }
-
        if (intel_cstate->update_pipe)
                intel_update_pipe_config(intel_crtc, old_intel_cstate);
        else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
        drm_modeset_acquire_fini(&ctx);
 }
 
-static void intel_atomic_helper_free_state(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
-       struct intel_atomic_state *state, *next;
-       struct llist_node *freed;
-
-       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
-       llist_for_each_entry_safe(state, next, freed, freed)
-               drm_atomic_state_put(&state->base);
-}
-
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
        dev->mode_config.funcs = &intel_mode_funcs;
 
        INIT_WORK(&dev_priv->atomic_helper.free_work,
-                 intel_atomic_helper_free_state);
+                 intel_atomic_helper_free_state_worker);
 
        intel_init_quirks(dev);
 
@@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
                }
        }
 
-       intel_update_czclk(dev_priv);
-       intel_update_cdclk(dev_priv);
-       dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
-
        intel_shared_dpll_init(dev);
 
+       intel_update_czclk(dev_priv);
+       intel_modeset_init_hw(dev);
+
        if (dev_priv->max_cdclk_freq == 0)
                intel_update_max_cdclk(dev_priv);
 
@@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
 
        intel_init_gt_powersave(dev_priv);
 
-       intel_modeset_init_hw(dev);
-
        intel_setup_overlay(dev_priv);
 }
 
index 371acf1..ab1be5c 100644 (file)
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
        /* Nothing to do here, execute in order of dependencies */
        engine->schedule = NULL;
 
+       ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
        dev_priv->engine[id] = engine;
        return 0;
 }
index 1b8ba2e..2d449fb 100644 (file)
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
-       unsigned long conn_configured, mask;
+       unsigned long conn_configured, conn_seq, mask;
        unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
        int i, j;
        bool *save_enabled;
        bool fallback = true;
        int num_connectors_enabled = 0;
        int num_connectors_detected = 0;
-       int pass = 0;
 
        save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
        if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
        mask = BIT(count) - 1;
        conn_configured = 0;
 retry:
+       conn_seq = conn_configured;
        for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
                if (conn_configured & BIT(i))
                        continue;
 
-               if (pass == 0 && !connector->has_tile)
+               if (conn_seq == 0 && !connector->has_tile)
                        continue;
 
                if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
                conn_configured |= BIT(i);
        }
 
-       if ((conn_configured & mask) != mask) {
-               pass++;
+       if ((conn_configured & mask) != mask && conn_configured != conn_seq)
                goto retry;
-       }
 
        /*
         * If the BIOS didn't enable everything it could, fall back to have the
index d23c0fc..8c04eca 100644 (file)
@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
                goto bail;
        }
 
+       if (!i915.enable_execlists) {
+               DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
+               goto bail;
+       }
+
        /*
         * We're not in host or fail to find a MPT module, disable GVT-g
         */
index ebae2bd..24b2fa5 100644 (file)
@@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc_state->base.crtc->dev;
+       struct drm_i915_private *dev_priv =
+               to_i915(crtc_state->base.crtc->dev);
+       struct drm_atomic_state *state = crtc_state->base.state;
+       struct drm_connector_state *connector_state;
+       struct drm_connector *connector;
+       int i;
 
-       if (HAS_GMCH_DISPLAY(to_i915(dev)))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                return false;
 
        /*
         * HDMI 12bpc affects the clocks, so it's only possible
         * when not cloning with other encoder types.
         */
-       return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
+       if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+               return false;
+
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               const struct drm_display_info *info = &connector->display_info;
+
+               if (connector_state->crtc != crtc_state->base.crtc)
+                       continue;
+
+               if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
+                       return false;
+       }
+
+       return true;
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
index b62e3f8..54208be 100644 (file)
@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                        }
                }
        }
-       if (dev_priv->display.hpd_irq_setup)
+       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                }
        }
 
-       if (storm_detected)
+       if (storm_detected && dev_priv->display_irqs_enabled)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock(&dev_priv->irq_lock);
 
@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
         * Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked checks happy.
         */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
+               spin_lock_irq(&dev_priv->irq_lock);
+               if (dev_priv->display_irqs_enabled)
+                       dev_priv->display.hpd_irq_setup(dev_priv);
+               spin_unlock_irq(&dev_priv->irq_lock);
+       }
 }
 
 static void i915_hpd_poll_init_work(struct work_struct *work)
index ebf8023..471af3b 100644 (file)
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
        if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
                return;
 
-       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+       atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+                                  status, rq);
 }
 
 static void
index 249623d..6a29784 100644 (file)
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                break;
        }
 
+       /* When byt can survive without system hang with dynamic
+        * sw freq adjustments, this restriction can be lifted.
+        */
+       if (IS_VALLEYVIEW(dev_priv))
+               goto skip_hw_write;
+
        I915_WRITE(GEN6_RP_UP_EI,
                   GT_INTERVAL_FROM_US(dev_priv, ei_up));
        I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                   GEN6_RP_UP_BUSY_AVG |
                   GEN6_RP_DOWN_IDLE_AVG);
 
+skip_hw_write:
        dev_priv->rps.power = new_power;
        dev_priv->rps.up_threshold = threshold_up;
        dev_priv->rps.down_threshold = threshold_down;
@@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
        u32 mask = 0;
 
+       /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
        if (val > dev_priv->rps.min_freq_softlimit)
-               mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+               mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
        if (val < dev_priv->rps.max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
@@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
 {
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
-               if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+               if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
                        gen6_rps_reset_ei(dev_priv);
                I915_WRITE(GEN6_PMINTRMSK,
                           gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
  * @timeout_base_ms: timeout for polling with preemption enabled
  *
  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
  * The request is acknowledged once the PCODE reply dword equals @reply after
  * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 10 ms with
+ * for @timeout_base_ms and if this times out for another 50 ms with
  * preemption disabled.
  *
  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
         * worst case) _and_ PCODE was busy for some reason even after a
         * (queued) request and @timeout_base_ms delay. As a workaround retry
         * the poll with preemption disabled to maximize the number of
-        * requests. Increase the timeout from @timeout_base_ms to 10ms to
+        * requests. Increase the timeout from @timeout_base_ms to 50ms to
         * account for interrupts that could reduce the number of these
-        * requests.
+        * requests, and for any quirks of the PCODE firmware that delays
+        * the request completion.
         */
        DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
        WARN_ON_ONCE(timeout_base_ms > 3);
        preempt_disable();
-       ret = wait_for_atomic(COND, 10);
+       ret = wait_for_atomic(COND, 50);
        preempt_enable();
 
 out:
index 79c2b8d..13dccb1 100644 (file)
@@ -403,6 +403,9 @@ struct intel_engine_cs {
         */
        struct i915_gem_context *legacy_active_context;
 
+       /* status_notifier: list of callbacks for context-switch changes */
+       struct atomic_notifier_head context_status_notifier;
+
        struct intel_engine_hangcheck hangcheck;
 
        bool needs_cmd_parser;
index 9ef5468..9481ca9 100644 (file)
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
                int scaler_id = plane_state->scaler_id;
                const struct intel_scaler *scaler;
 
-               DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
-                             plane_id, PS_PLANE_SEL(plane_id));
-
                scaler = &crtc_state->scaler_state.scalers[scaler_id];
 
                I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
index abe0888..b7ff592 100644 (file)
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
 
        for_each_fw_domain_masked(d, fw_domains, dev_priv)
                fw_domain_wait_ack(d);
+
+       dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 
 static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
                fw_domain_put(d);
                fw_domain_posting_read(d);
        }
+
+       dev_priv->uncore.fw_domains_active &= ~fw_domains;
 }
 
 static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
        if (WARN_ON(domain->wake_count == 0))
                domain->wake_count++;
 
-       if (--domain->wake_count == 0) {
+       if (--domain->wake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
-               dev_priv->uncore.fw_domains_active &= ~domain->mask;
-       }
 
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
                        fw_domains &= ~domain->mask;
        }
 
-       if (fw_domains) {
+       if (fw_domains)
                dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-               dev_priv->uncore.fw_domains_active |= fw_domains;
-       }
 }
 
 /**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
                fw_domain_arm_timer(domain);
 
        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-       dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 
 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
index af267c3..ee5883f 100644 (file)
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
        struct drm_gem_object *obj = buffer->priv;
        int ret = 0;
 
-       if (WARN_ON(!obj->filp))
-               return -EINVAL;
-
        ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
        if (ret < 0)
                return ret;
index d12b897..c7af9fd 100644 (file)
@@ -2984,6 +2984,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
                }
+       } else if (rdev->family == CHIP_OLAND) {
+               if ((rdev->pdev->revision == 0xC7) ||
+                   (rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0x87) ||
+                   (rdev->pdev->device == 0x6604) ||
+                   (rdev->pdev->device == 0x6605)) {
+                       max_sclk = 75000;
+               }
        }
 
        if (rps->vce_active) {
index f80bf93..d745e8b 100644 (file)
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+       unsigned long flags;
 
        WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
        mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
        tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
                          LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
                          LCDC_PALETTE_LOAD_MODE_MASK);
+
+       /* There is no real chance for a race here as the time stamp
+        * is taken before the raster DMA is started. The spin-lock is
+        * taken to have a memory barrier after taking the time-stamp
+        * and to avoid a context switch between taking the stamp and
+        * enabling the raster.
+        */
+       spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+       tilcdc_crtc->last_vblank = ktime_get();
        tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+       spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
        drm_crtc_vblank_on(crtc);
 
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
        }
 
        drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-       tilcdc_crtc->last_vblank = 0;
 
        tilcdc_crtc->enabled = false;
        mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
 {
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       unsigned long flags;
 
        WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
        drm_framebuffer_reference(fb);
 
        crtc->primary->fb = fb;
+       tilcdc_crtc->event = event;
 
-       spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+       mutex_lock(&tilcdc_crtc->enable_lock);
 
-       if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+       if (tilcdc_crtc->enabled) {
+               unsigned long flags;
                ktime_t next_vblank;
                s64 tdiff;
 
-               next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
-                       1000000 / crtc->hwmode.vrefresh);
+               spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
 
+               next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+                                          1000000 / crtc->hwmode.vrefresh);
                tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
 
                if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
                        tilcdc_crtc->next_fb = fb;
-       }
-
-       if (tilcdc_crtc->next_fb != fb)
-               set_scanout(crtc, fb);
+               else
+                       set_scanout(crtc, fb);
 
-       tilcdc_crtc->event = event;
+               spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+       }
 
-       spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+       mutex_unlock(&tilcdc_crtc->enable_lock);
 
        return 0;
 }
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
 
 fail:
        tilcdc_crtc_destroy(crtc);
-       return -ENOMEM;
+       return ret;
 }
index 1aeb80e..8c54cb8 100644 (file)
@@ -175,11 +175,11 @@ config HID_CHERRY
        Support for Cherry Cymotion keyboard.
 
 config HID_CHICONY
-       tristate "Chicony Tactical pad"
+       tristate "Chicony devices"
        depends on HID
        default !EXPERT
        ---help---
-       Support for Chicony Tactical pad.
+       Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
        tristate "Corsair devices"
@@ -190,6 +190,7 @@ config HID_CORSAIR
 
        Supported devices:
        - Vengeance K90
+       - Scimitar PRO RGB
 
 config HID_PRODIKEYS
        tristate "Prodikeys PC-MIDI Keyboard support"
index bc3cec1..f04ed9a 100644 (file)
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
index e9e87d3..3ceb4a2 100644 (file)
@@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
index c0303f6..9ba5d98 100644 (file)
@@ -3,8 +3,10 @@
  *
  * Supported devices:
  *  - Vengeance K90 Keyboard
+ *  - Scimitar PRO RGB Gaming Mouse
  *
  * Copyright (c) 2015 Clement Vuchener
+ * Copyright (c) 2017 Oscar Campos
  */
 
 /*
@@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
        return 0;
 }
 
+/*
+ * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
+ * non parseable as they define two consecutive Logical Minimum for
+ * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
+ * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
+ * prevents poper parsing of the report descriptor due Logical
+ * Minimum being larger than Logical Maximum.
+ *
+ * This driver fixes the report descriptor for:
+ * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
+ */
+
+static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+        unsigned int *rsize)
+{
+       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+       if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+               /*
+                * Corsair Scimitar RGB Pro report descriptor is broken and
+                * defines two different Logical Minimum for the Consumer
+                * Application. The byte 77 should be a 0x26 defining a 16
+                * bits integer for the Logical Maximum but it is a 0x16
+                * instead (Logical Minimum)
+                */
+               switch (hdev->product) {
+               case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
+                       if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
+                       && rdesc[78] == 0xff && rdesc[79] == 0x0f) {
+                               hid_info(hdev, "Fixing up report descriptor\n");
+                               rdesc[77] = 0x26;
+                       }
+                       break;
+               }
+
+       }
+       return rdesc;
+}
+
 static const struct hid_device_id corsair_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
                .driver_data = CORSAIR_USE_K90_MACRO |
                               CORSAIR_USE_K90_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
+            USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
        {}
 };
 
@@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
        .event = corsair_event,
        .remove = corsair_remove,
        .input_mapping = corsair_input_mapping,
+       .report_fixup = corsair_mouse_report_fixup,
 };
 
 module_hid_driver(corsair_driver);
 
 MODULE_LICENSE("GPL");
+/* Original K90 driver author */
 MODULE_AUTHOR("Clement Vuchener");
+/* Scimitar PRO RGB driver author */
+MODULE_AUTHOR("Oscar Campos");
 MODULE_DESCRIPTION("HID driver for Corsair devices");
index 86c95d3..0e2e7c5 100644 (file)
 #define USB_DEVICE_ID_CORSAIR_K70RGB    0x1b13
 #define USB_DEVICE_ID_CORSAIR_STRAFE    0x1b15
 #define USB_DEVICE_ID_CORSAIR_K65RGB    0x1b17
+#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE  0x1b38
+#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE  0x1b39
+#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB  0x1b3e
 
 #define USB_VENDOR_ID_CREATIVELABS     0x041e
 #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51     0x322c
 
 #define USB_VENDOR_ID_JESS             0x0c45
 #define USB_DEVICE_ID_JESS_YUREX       0x1010
+#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
 
 #define USB_VENDOR_ID_JESS2            0x0f30
 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
index f405b07..740996f 100644 (file)
@@ -2632,6 +2632,8 @@ err_stop:
                sony_leds_remove(sc);
        if (sc->quirks & SONY_BATTERY_SUPPORT)
                sony_battery_remove(sc);
+       if (sc->touchpad)
+               sony_unregister_touchpad(sc);
        sony_cancel_work_sync(sc);
        kfree(sc->output_report_dmabuf);
        sony_remove_dev_list(sc);
index d6847a6..a69a3c8 100644 (file)
@@ -80,6 +80,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
index be8f7e2..994bddc 100644 (file)
@@ -2579,7 +2579,9 @@ static void wacom_remove(struct hid_device *hdev)
 
        /* make sure we don't trigger the LEDs */
        wacom_led_groups_release(wacom);
-       wacom_release_resources(wacom);
+
+       if (wacom->wacom_wac.features.type != REMOTE)
+               wacom_release_resources(wacom);
 
        hid_set_drvdata(hdev, NULL);
 }
index 4aa3de9..94250c2 100644 (file)
@@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
                input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
                input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
                input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
-               input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
-               input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+               if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+                       input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+                       input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+               }
                break;
        case WACOM_HID_WD_FINGERWHEEL:
                wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
@@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 =
          WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x360 =
        { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
-         INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+         INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
 static const struct wacom_features wacom_features_0x361 =
        { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
-         INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+         INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
index d96aa27..db64adf 100644 (file)
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
 
        interface = intf->cur_altsetting;
 
+       if (interface->desc.bNumEndpoints < 2)
+               return -ENODEV;
+
        epirq = &interface->endpoint[0].desc;
        epout = &interface->endpoint[1].desc;
 
index 9cc6d05..23c191a 100644 (file)
@@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
        int error = -ENOMEM;
 
        interface = intf->cur_altsetting;
+
+       if (interface->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        endpoint = &interface->endpoint[0].desc;
 
        if (!usb_endpoint_is_int_in(endpoint))
index 9c0ea36..f4e8fbe 100644 (file)
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
                return -EINVAL;
 
        alt = pcu->ctrl_intf->cur_altsetting;
+
+       if (alt->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        pcu->ep_ctrl = &alt->endpoint[0].desc;
        pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
 
index 79c964c..6e7ff95 100644 (file)
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        int ret, pipe, i;
 
        interface = intf->cur_altsetting;
+
+       if (interface->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        endpoint = &interface->endpoint[0].desc;
        if (!usb_endpoint_is_int_in(endpoint))
                return -ENODEV;
index 72b28eb..f210e19 100644 (file)
@@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
        /* handle buttons */
        if (pkt_id == SS4_PACKET_ID_STICK) {
                f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
-               if (!(priv->flags & ALPS_BUTTONPAD)) {
-                       f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
-                       f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
-               }
+               f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+               f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
        } else {
                f->left = !!(SS4_BTN_V2(p) & 0x01);
                if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
        int num_y_electrode;
        int x_pitch, y_pitch, x_phys, y_phys;
 
-       num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
-       num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+       if (IS_SS4PLUS_DEV(priv->dev_id)) {
+               num_x_electrode =
+                       SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
+               num_y_electrode =
+                       SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
+
+               priv->x_max =
+                       (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
+               priv->y_max =
+                       (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
 
-       priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
-       priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+               x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+               y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
 
-       x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
-       y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+       } else {
+               num_x_electrode =
+                       SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
+               num_y_electrode =
+                       SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+
+               priv->x_max =
+                       (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+               priv->y_max =
+                       (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+
+               x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
+               y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+       }
 
        x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
        y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
 {
        unsigned char is_btnless;
 
-       is_btnless = (otp[1][1] >> 3) & 0x01;
+       if (IS_SS4PLUS_DEV(priv->dev_id))
+               is_btnless = (otp[1][0] >> 1) & 0x01;
+       else
+               is_btnless = (otp[1][1] >> 3) & 0x01;
 
        if (is_btnless)
                priv->flags |= ALPS_BUTTONPAD;
@@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
        return 0;
 }
 
+static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
+                                      struct alps_data *priv)
+{
+       bool is_dual = false;
+
+       if (IS_SS4PLUS_DEV(priv->dev_id))
+               is_dual = (otp[0][0] >> 4) & 0x01;
+
+       if (is_dual)
+               priv->flags |= ALPS_DUALPOINT |
+                                       ALPS_DUALPOINT_WITH_PRESSURE;
+
+       return 0;
+}
+
 static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
                                    struct alps_data *priv)
 {
@@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
 
        alps_update_btn_info_ss4_v2(otp, priv);
 
+       alps_update_dual_info_ss4_v2(otp, priv);
+
        return 0;
 }
 
@@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
                if (alps_set_defaults_ss4_v2(psmouse, priv))
                        return -EIO;
 
-               if (priv->fw_ver[1] == 0x1)
-                       priv->flags |= ALPS_DUALPOINT |
-                                       ALPS_DUALPOINT_WITH_PRESSURE;
-
                break;
        }
 
@@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
                           ec[2] >= 0x90 && ec[2] <= 0x9d) {
                        protocol = &alps_v3_protocol_data;
                } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-                          e7[2] == 0x14 && ec[1] == 0x02) {
-                       protocol = &alps_v8_protocol_data;
-               } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-                          e7[2] == 0x28 && ec[1] == 0x01) {
+                          (e7[2] == 0x14 || e7[2] == 0x28)) {
                        protocol = &alps_v8_protocol_data;
                } else {
                        psmouse_dbg(psmouse,
@@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
        }
 
        if (priv) {
-               /* Save the Firmware version */
+               /* Save Device ID and Firmware version */
+               memcpy(priv->dev_id, e7, 3);
                memcpy(priv->fw_ver, ec, 3);
                error = alps_set_protocol(psmouse, priv, protocol);
                if (error)
index 6d279aa..4334f28 100644 (file)
@@ -54,6 +54,16 @@ enum SS4_PACKET_ID {
 
 #define SS4_MASK_NORMAL_BUTTONS                0x07
 
+#define SS4PLUS_COUNT_PER_ELECTRODE    128
+#define SS4PLUS_NUMSENSOR_XOFFSET      16
+#define SS4PLUS_NUMSENSOR_YOFFSET      5
+#define SS4PLUS_MIN_PITCH_MM           37
+
+#define IS_SS4PLUS_DEV(_b)     (((_b[0]) == 0x73) &&   \
+                                ((_b[1]) == 0x03) &&   \
+                                ((_b[2]) == 0x28)              \
+                               )
+
 #define SS4_IS_IDLE_V2(_b)     (((_b[0]) == 0x18) &&           \
                                 ((_b[1]) == 0x10) &&           \
                                 ((_b[2]) == 0x00) &&           \
@@ -283,6 +293,7 @@ struct alps_data {
        int addr_command;
        u16 proto_version;
        u8 byte0, mask0;
+       u8 dev_id[3];
        u8 fw_ver[3];
        int flags;
        int x_max;
index 352050e..d5ab9dd 100644 (file)
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
 
 static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
 {
-       if (data->ic_type != 0x0E)
-               return false;
-
-       switch (data->product_id) {
-       case 0x05 ... 0x07:
-       case 0x09:
-       case 0x13:
+       if (data->ic_type == 0x0E) {
+               switch (data->product_id) {
+               case 0x05 ... 0x07:
+               case 0x09:
+               case 0x13:
+                       return true;
+               }
+       } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+               /* ASUS EeeBook X205TA */
                return true;
-       default:
-               return false;
        }
+
+       return false;
 }
 
 static int __elan_initialize(struct elan_tp_data *data)
index 1986786..34dfee5 100644 (file)
@@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn)
                                rmi_get_platform_data(fn->rmi_dev);
        int error;
 
+       /* can happen if f30_data.disable is set */
+       if (!f30)
+               return 0;
+
        if (pdata->f30_data.trackstick_buttons) {
                /* Try [re-]establish link to F03. */
                f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
index 05afd16..312bd6c 100644 (file)
@@ -119,6 +119,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
                },
        },
+       {
+               /* Dell Embedded Box PC 3000 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+               },
+       },
        {
                /* OQO Model 01 */
                .matches = {
@@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
                },
        },
+       {
+               /* TUXEDO BU1406 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+               },
+       },
        { }
 };
 
index cd85205..df4bea9 100644 (file)
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
        int error;
        int i;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!hanwang || !input_dev) {
index e850d7e..4d9d649 100644 (file)
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
        struct input_dev *input_dev;
        int error = -ENOMEM;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!kbtab || !input_dev)
index aefb6e1..4c0eeca 100644 (file)
@@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface,
        if (iface_desc->desc.bInterfaceClass != 0xFF)
                return -ENODEV;
 
+       if (iface_desc->desc.bNumEndpoints < 5)
+               return -ENODEV;
+
        /* Use endpoint #4 (0x86). */
        endpoint = &iface_desc->endpoint[4].desc;
        if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
index 98940d1..b17536d 100644 (file)
@@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
 
        region = iommu_alloc_resv_region(MSI_RANGE_START,
                                         MSI_RANGE_END - MSI_RANGE_START + 1,
-                                        0, IOMMU_RESV_RESERVED);
+                                        0, IOMMU_RESV_MSI);
        if (!region)
                return;
        list_add_tail(&region->list, head);
index 5806a6a..591bb96 100644 (file)
@@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 
        region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                        prot, IOMMU_RESV_MSI);
+                                        prot, IOMMU_RESV_SW_MSI);
        if (!region)
                return;
 
index abf6496..b493c99 100644 (file)
@@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 
        region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-                                        prot, IOMMU_RESV_MSI);
+                                        prot, IOMMU_RESV_SW_MSI);
        if (!region)
                return;
 
index a7e0821..c01bfcd 100644 (file)
@@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
        spin_lock_irqsave(&data->lock, flags);
        if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
                clk_enable(data->clk_master);
-               __sysmmu_tlb_invalidate_entry(data, iova, 1);
+               if (sysmmu_block(data)) {
+                       if (data->version >= MAKE_MMU_VER(5, 0))
+                               __sysmmu_tlb_invalidate(data);
+                       else
+                               __sysmmu_tlb_invalidate_entry(data, iova, 1);
+                       sysmmu_unblock(data);
+               }
                clk_disable(data->clk_master);
        }
        spin_unlock_irqrestore(&data->lock, flags);
index 238ad34..d412a31 100644 (file)
@@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
                                 * which we used for the IOMMU lookup. Strictly speaking
                                 * we could do this for all PCI devices; we only need to
                                 * get the BDF# from the scope table for ACPI matches. */
-                               if (pdev->is_virtfn)
+                               if (pdev && pdev->is_virtfn)
                                        goto got_pdev;
 
                                *bus = drhd->devices[i].bus;
@@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
 
        reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
                                      IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
-                                     0, IOMMU_RESV_RESERVED);
+                                     0, IOMMU_RESV_MSI);
        if (!reg)
                return;
        list_add_tail(&reg->list, head);
index 1c049e2..8d6ca28 100644 (file)
@@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
                        pte |= ARM_V7S_ATTR_NS_TABLE;
 
                __arm_v7s_set_pte(ptep, pte, 1, cfg);
-       } else {
+       } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
                cptep = iopte_deref(pte, lvl);
+       } else {
+               /* We require an unmap first */
+               WARN_ON(!selftest_running);
+               return -EEXIST;
        }
 
        /* Rinse, repeat */
index feacc54..f9bc6eb 100644 (file)
@@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
                if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
                        pte |= ARM_LPAE_PTE_NSTABLE;
                __arm_lpae_set_pte(ptep, pte, cfg);
-       } else {
+       } else if (!iopte_leaf(pte, lvl)) {
                cptep = iopte_deref(pte, data);
+       } else {
+               /* We require an unmap first */
+               WARN_ON(!selftest_running);
+               return -EEXIST;
        }
 
        /* Rinse, repeat */
index 8ea14f4..3b67144 100644 (file)
@@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = {
        [IOMMU_RESV_DIRECT]     = "direct",
        [IOMMU_RESV_RESERVED]   = "reserved",
        [IOMMU_RESV_MSI]        = "msi",
+       [IOMMU_RESV_SW_MSI]     = "msi",
 };
 
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)          \
@@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
 }
 
 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
-                                                 size_t length,
-                                                 int prot, int type)
+                                                 size_t length, int prot,
+                                                 enum iommu_resv_type type)
 {
        struct iommu_resv_region *region;
 
index 2b13117..321ecac 100644 (file)
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
                bm_lockres->flags |= DLM_LKF_NOQUEUE;
                ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
                if (ret == -EAGAIN) {
-                       memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
                        s = read_resync_info(mddev, bm_lockres);
                        if (s) {
                                pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
        lockres_free(cinfo->bitmap_lockres);
        unlock_all_bitmaps(mddev);
        dlm_release_lockspace(cinfo->lockspace, 2);
+       kfree(cinfo);
        return 0;
 }
 
index 548d1b8..f6ae1d6 100644 (file)
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
 }
 EXPORT_SYMBOL(md_flush_request);
 
-void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
-{
-       struct mddev *mddev = cb->data;
-       md_wakeup_thread(mddev->thread);
-       kfree(cb);
-}
-EXPORT_SYMBOL(md_unplug);
-
 static inline struct mddev *mddev_get(struct mddev *mddev)
 {
        atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
        }
        sb = page_address(rdev->sb_page);
        sb->data_size = cpu_to_le64(num_sectors);
-       sb->super_offset = rdev->sb_start;
+       sb->super_offset = cpu_to_le64(rdev->sb_start);
        sb->sb_csum = calc_sb_1_csum(sb);
        do {
                md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
        /* Check if any mddev parameters have changed */
        if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
            (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
-           (mddev->layout != le64_to_cpu(sb->layout)) ||
+           (mddev->layout != le32_to_cpu(sb->layout)) ||
            (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
            (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
                return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
        mddev->layout        = info->layout;
        mddev->chunk_sectors = info->chunk_size >> 9;
 
-       mddev->max_disks     = MD_SB_DISKS;
-
        if (mddev->persistent) {
-               mddev->flags         = 0;
-               mddev->sb_flags         = 0;
+               mddev->max_disks = MD_SB_DISKS;
+               mddev->flags = 0;
+               mddev->sb_flags = 0;
        }
        set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
                        return -ENOSPC;
        }
        rv = mddev->pers->resize(mddev, num_sectors);
-       if (!rv)
-               revalidate_disk(mddev->gendisk);
+       if (!rv) {
+               if (mddev->queue) {
+                       set_capacity(mddev->gendisk, mddev->array_sectors);
+                       revalidate_disk(mddev->gendisk);
+               }
+       }
        return rv;
 }
 
index b8859cb..dde8ecb 100644 (file)
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                                   struct mddev *mddev);
 
-extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
 extern void md_update_sb(struct mddev *mddev, int force);
 extern void md_kick_rdev_from_array(struct md_rdev * rdev);
 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
-static inline int mddev_check_plugged(struct mddev *mddev)
-{
-       return !!blk_check_plugged(md_unplug, mddev,
-                                  sizeof(struct blk_plug_cb));
-}
 
 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
 {
index fbc2d78..a34f587 100644 (file)
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
 static void freeze_array(struct r1conf *conf, int extra)
 {
        /* Stop sync I/O and normal I/O and wait for everything to
-        * go quite.
+        * go quiet.
         * This is called in two situations:
         * 1) management command handlers (reshape, remove disk, quiesce).
         * 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
                        split = bio;
                }
 
-               if (bio_data_dir(split) == READ)
+               if (bio_data_dir(split) == READ) {
                        raid1_read_request(mddev, split);
-               else
+
+                       /*
+                        * If a bio is splitted, the first part of bio will
+                        * pass barrier but the bio is queued in
+                        * current->bio_list (see generic_make_request). If
+                        * there is a raise_barrier() called here, the second
+                        * part of bio can't pass barrier. But since the first
+                        * part bio isn't dispatched to underlaying disks yet,
+                        * the barrier is never released, hence raise_barrier
+                        * will alays wait. We have a deadlock.
+                        * Note, this only happens in read path. For write
+                        * path, the first part of bio is dispatched in a
+                        * schedule() call (because of blk plug) or offloaded
+                        * to raid10d.
+                        * Quitting from the function immediately can change
+                        * the bio order queued in bio_list and avoid the deadlock.
+                        */
+                       if (split != bio) {
+                               generic_make_request(bio);
+                               break;
+                       }
+               } else
                        raid1_write_request(mddev, split);
        } while (split != bio);
 }
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
                        return ret;
        }
        md_set_array_sectors(mddev, newsize);
-       set_capacity(mddev->gendisk, mddev->array_sectors);
-       revalidate_disk(mddev->gendisk);
        if (sectors > mddev->dev_sectors &&
            mddev->recovery_cp > mddev->dev_sectors) {
                mddev->recovery_cp = mddev->dev_sectors;
index 0536658..e89a8d7 100644 (file)
@@ -1478,11 +1478,24 @@ retry_write:
                        mbio->bi_bdev = (void*)rdev;
 
                        atomic_inc(&r10_bio->remaining);
+
+                       cb = blk_check_plugged(raid10_unplug, mddev,
+                                              sizeof(*plug));
+                       if (cb)
+                               plug = container_of(cb, struct raid10_plug_cb,
+                                                   cb);
+                       else
+                               plug = NULL;
                        spin_lock_irqsave(&conf->device_lock, flags);
-                       bio_list_add(&conf->pending_bio_list, mbio);
-                       conf->pending_count++;
+                       if (plug) {
+                               bio_list_add(&plug->pending, mbio);
+                               plug->pending_cnt++;
+                       } else {
+                               bio_list_add(&conf->pending_bio_list, mbio);
+                               conf->pending_count++;
+                       }
                        spin_unlock_irqrestore(&conf->device_lock, flags);
-                       if (!mddev_check_plugged(mddev))
+                       if (!plug)
                                md_wakeup_thread(mddev->thread);
                }
        }
@@ -1572,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
                        split = bio;
                }
 
+               /*
+                * If a bio is splitted, the first part of bio will pass
+                * barrier but the bio is queued in current->bio_list (see
+                * generic_make_request). If there is a raise_barrier() called
+                * here, the second part of bio can't pass barrier. But since
+                * the first part bio isn't dispatched to underlaying disks
+                * yet, the barrier is never released, hence raise_barrier will
+                * alays wait. We have a deadlock.
+                * Note, this only happens in read path. For write path, the
+                * first part of bio is dispatched in a schedule() call
+                * (because of blk plug) or offloaded to raid10d.
+                * Quitting from the function immediately can change the bio
+                * order queued in bio_list and avoid the deadlock.
+                */
                __make_request(mddev, split);
+               if (split != bio && bio_data_dir(bio) == READ) {
+                       generic_make_request(bio);
+                       break;
+               }
        } while (split != bio);
 
        /* In case raid10d snuck in to freeze_array */
@@ -3944,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
                        return ret;
        }
        md_set_array_sectors(mddev, size);
-       if (mddev->queue) {
-               set_capacity(mddev->gendisk, mddev->array_sectors);
-               revalidate_disk(mddev->gendisk);
-       }
        if (sectors > mddev->dev_sectors &&
            mddev->recovery_cp > oldsize) {
                mddev->recovery_cp = oldsize;
index 4fb09b3..ed5cd70 100644 (file)
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
                     (test_bit(R5_Wantdrain, &dev->flags) ||
                      test_bit(R5_InJournal, &dev->flags))) ||
                    (srctype == SYNDROME_SRC_WRITTEN &&
-                    dev->written)) {
+                    (dev->written ||
+                     test_bit(R5_InJournal, &dev->flags)))) {
                        if (test_bit(R5_InJournal, &dev->flags))
                                srcs[slot] = sh->dev[i].orig_page;
                        else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
                        return ret;
        }
        md_set_array_sectors(mddev, newsize);
-       set_capacity(mddev->gendisk, mddev->array_sectors);
-       revalidate_disk(mddev->gendisk);
        if (sectors > mddev->dev_sectors &&
            mddev->recovery_cp > mddev->dev_sectors) {
                mddev->recovery_cp = mddev->dev_sectors;
index 67fd8ff..669a4c8 100644 (file)
@@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
 
-static const struct platform_driver vdoa_driver = {
+static struct platform_driver vdoa_driver = {
        .probe          = vdoa_probe,
        .remove         = vdoa_remove,
        .driver         = {
index cbb0376..0f0c389 100644 (file)
@@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
 
        if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
                (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
-               (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
                (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
-               (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
                (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
                swap(addr->cb, addr->cr);
 
index 8236081..7918b92 100644 (file)
@@ -632,8 +632,8 @@ static int bdisp_open(struct file *file)
 
 error_ctrls:
        bdisp_ctrls_delete(ctx);
-error_fh:
        v4l2_fh_del(&ctx->fh);
+error_fh:
        v4l2_fh_exit(&ctx->fh);
        bdisp_hw_free_nodes(ctx);
 mem_ctx:
index ab98660..04033ef 100644 (file)
@@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
 {
        struct hexline *hx;
-       u8 reset;
-       int ret,pos=0;
+       u8 *buf;
+       int ret, pos = 0;
+       u16 cpu_cs_register = cypress[type].cpu_cs_register;
 
-       hx = kmalloc(sizeof(*hx), GFP_KERNEL);
-       if (!hx)
+       buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+       if (!buf)
                return -ENOMEM;
+       hx = (struct hexline *)buf;
 
        /* stop the CPU */
-       reset = 1;
-       if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+       buf[0] = 1;
+       if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
                err("could not stop the USB controller CPU.");
 
        while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
@@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
        }
        if (ret < 0) {
                err("firmware download failed at %d with %d",pos,ret);
-               kfree(hx);
+               kfree(buf);
                return ret;
        }
 
        if (ret == 0) {
                /* restart the CPU */
-               reset = 0;
-               if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+               buf[0] = 0;
+               if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
                        err("could not restart the USB controller CPU.");
                        ret = -EINVAL;
                }
        } else
                ret = -EIO;
 
-       kfree(hx);
+       kfree(buf);
 
        return ret;
 }
index 5457c36..bf0fe01 100644 (file)
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
        if (!of_property_read_u32(child, "dma-channel", &val))
                gpmc_onenand_data->dma_channel = val;
 
-       gpmc_onenand_init(gpmc_onenand_data);
-
-       return 0;
+       return gpmc_onenand_init(gpmc_onenand_data);
 }
 #else
 static int gpmc_probe_onenand_child(struct platform_device *pdev,
index 1621fa0..ff3da96 100644 (file)
@@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
                               struct mmc_blk_request *brq, struct request *req,
                               bool old_req_pending)
 {
-       struct mmc_queue_req *mq_rq;
        bool req_pending;
 
-       mq_rq = container_of(brq, struct mmc_queue_req, brq);
-
        /*
         * If this is an SD card and we're writing, we can first
         * mark the known good sectors as ok.
@@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                case MMC_BLK_CMD_ERR:
                        req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
                        if (mmc_blk_reset(md, card->host, type)) {
-                               mmc_blk_rw_cmd_abort(card, old_req);
+                               if (req_pending)
+                                       mmc_blk_rw_cmd_abort(card, old_req);
                                mmc_blk_rw_try_restart(mq, new_req);
                                return;
                        }
@@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                mmc_blk_issue_flush(mq, req);
        } else {
                mmc_blk_issue_rw_rq(mq, req);
+               card->host->context_info.is_waiting_last_req = false;
        }
 
 out:
index 7fd7228..b502601 100644 (file)
@@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                err = mmc_select_hs400(card);
                if (err)
                        goto free_card;
-       } else {
+       } else if (!mmc_card_hs400es(card)) {
                /* Select the desired bus width optionally */
                err = mmc_select_bus_width(card);
                if (err > 0 && mmc_card_hs(card)) {
index 8e32580..b235d8d 100644 (file)
@@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
                }
        }
        sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
-                       (mode << 8) | (div % 0xff));
+                     (mode << 8) | div);
        sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
        while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
                cpu_relax();
@@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
        host->src_clk_freq = clk_get_rate(host->src_clk);
        /* Set host parameters to mmc */
        mmc->ops = &mt_msdc_ops;
-       mmc->f_min = host->src_clk_freq / (4 * 255);
+       mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
 
        mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
        /* MMC core transfer sizes tunable parameters */
index 410a55b..1cfd7f9 100644 (file)
 #include "sdhci-pltfm.h"
 #include <linux/of.h>
 
-#define SDHCI_ARASAN_CLK_CTRL_OFFSET   0x2c
 #define SDHCI_ARASAN_VENDOR_REGISTER   0x78
 
 #define VENDOR_ENHANCED_STROBE         BIT(0)
-#define CLK_CTRL_TIMEOUT_SHIFT         16
-#define CLK_CTRL_TIMEOUT_MASK          (0xf << CLK_CTRL_TIMEOUT_SHIFT)
-#define CLK_CTRL_TIMEOUT_MIN_EXP       13
 
 #define PHY_CLK_TOO_SLOW_HZ            400000
 
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
 
 static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
 {
-       u32 div;
        unsigned long freq;
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
-       div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
-       div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+       /* SDHCI timeout clock is in kHz */
+       freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
 
-       freq = clk_get_rate(pltfm_host->clk);
-       freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+       /* or in MHz */
+       if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
+               freq = DIV_ROUND_UP(freq, 1000);
 
        return freq;
 }
index 2f9ad21..7fd9642 100644 (file)
@@ -85,11 +85,30 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
 
+/*
+ * In this specific implementation of the SDHCI controller, the power register
+ * needs to have a valid voltage set even when the power supply is managed by
+ * an external regulator.
+ */
+static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
+                    unsigned short vdd)
+{
+       if (!IS_ERR(host->mmc->supply.vmmc)) {
+               struct mmc_host *mmc = host->mmc;
+
+               spin_unlock_irq(&host->lock);
+               mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+               spin_lock_irq(&host->lock);
+       }
+       sdhci_set_power_noreg(host, mode, vdd);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
        .set_clock              = sdhci_at91_set_clock,
        .set_bus_width          = sdhci_set_bus_width,
        .reset                  = sdhci_reset,
        .set_uhs_signaling      = sdhci_set_uhs_signaling,
+       .set_power              = sdhci_at91_set_power,
 };
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
index 982b3e3..86560d5 100644 (file)
@@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
        if (mode == MMC_POWER_OFF)
                return;
 
+       spin_unlock_irq(&host->lock);
+
        /*
         * Bus power might not enable after D3 -> D0 transition due to the
         * present state not yet having propagated. Retry for up to 2ms.
@@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
                reg |= SDHCI_POWER_ON;
                sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
        }
+
+       spin_lock_irq(&host->lock);
 }
 
 static const struct sdhci_ops sdhci_intel_byt_ops = {
index 6fdd7a7..9c1a099 100644 (file)
@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
                        return;
                }
                timeout--;
-               mdelay(1);
+               spin_unlock_irq(&host->lock);
+               usleep_range(900, 1100);
+               spin_lock_irq(&host->lock);
        }
 
        clk |= SDHCI_CLOCK_CARD_EN;
index d2c386f..1d84335 100644 (file)
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct ushc_data *ushc;
        int ret;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+               return -ENODEV;
+
        mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
        if (mmc == NULL)
                return -ENOMEM;
index 8a280e7..127adbe 100644 (file)
 #define XP_ECC_CNT1_DESC_DED_WIDTH             8
 #define XP_ECC_CNT1_DESC_SEC_INDEX             0
 #define XP_ECC_CNT1_DESC_SEC_WIDTH             8
-#define XP_ECC_IER_DESC_DED_INDEX              0
+#define XP_ECC_IER_DESC_DED_INDEX              5
 #define XP_ECC_IER_DESC_DED_WIDTH              1
-#define XP_ECC_IER_DESC_SEC_INDEX              1
+#define XP_ECC_IER_DESC_SEC_INDEX              4
 #define XP_ECC_IER_DESC_SEC_WIDTH              1
-#define XP_ECC_IER_RX_DED_INDEX                        2
+#define XP_ECC_IER_RX_DED_INDEX                        3
 #define XP_ECC_IER_RX_DED_WIDTH                        1
-#define XP_ECC_IER_RX_SEC_INDEX                        3
+#define XP_ECC_IER_RX_SEC_INDEX                        2
 #define XP_ECC_IER_RX_SEC_WIDTH                        1
-#define XP_ECC_IER_TX_DED_INDEX                        4
+#define XP_ECC_IER_TX_DED_INDEX                        1
 #define XP_ECC_IER_TX_DED_WIDTH                        1
-#define XP_ECC_IER_TX_SEC_INDEX                        5
+#define XP_ECC_IER_TX_SEC_INDEX                        0
 #define XP_ECC_IER_TX_SEC_WIDTH                        1
-#define XP_ECC_ISR_DESC_DED_INDEX              0
+#define XP_ECC_ISR_DESC_DED_INDEX              5
 #define XP_ECC_ISR_DESC_DED_WIDTH              1
-#define XP_ECC_ISR_DESC_SEC_INDEX              1
+#define XP_ECC_ISR_DESC_SEC_INDEX              4
 #define XP_ECC_ISR_DESC_SEC_WIDTH              1
-#define XP_ECC_ISR_RX_DED_INDEX                        2
+#define XP_ECC_ISR_RX_DED_INDEX                        3
 #define XP_ECC_ISR_RX_DED_WIDTH                        1
-#define XP_ECC_ISR_RX_SEC_INDEX                        3
+#define XP_ECC_ISR_RX_SEC_INDEX                        2
 #define XP_ECC_ISR_RX_SEC_WIDTH                        1
-#define XP_ECC_ISR_TX_DED_INDEX                        4
+#define XP_ECC_ISR_TX_DED_INDEX                        1
 #define XP_ECC_ISR_TX_DED_WIDTH                        1
-#define XP_ECC_ISR_TX_SEC_INDEX                        5
+#define XP_ECC_ISR_TX_SEC_INDEX                        0
 #define XP_ECC_ISR_TX_SEC_WIDTH                        1
 #define XP_I2C_MUTEX_BUSY_INDEX                        31
 #define XP_I2C_MUTEX_BUSY_WIDTH                        1
 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH   1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX   1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH   1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX  2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH  1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX                2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH                1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX        3
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH        1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX     4
 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH   1
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX    6
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH    1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX       7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH       1
 
 #define RX_NORMAL_DESC0_OVT_INDEX              0
 #define RX_NORMAL_DESC0_OVT_WIDTH              16
index 937f37a..24a687c 100644 (file)
@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
 
        /* Get the header length */
        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              FIRST, 1);
                rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
                                                      RX_NORMAL_DESC2, HL);
                if (rdata->rx.hdr_len)
                        pdata->ext_stats.rx_split_header_packets++;
+       } else {
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              FIRST, 0);
        }
 
        /* Get the RSS hash */
@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                }
        }
 
-       /* Get the packet length */
-       rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
-       if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
-               /* Not all the data has been transferred for this packet */
-               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-                              INCOMPLETE, 1);
+       /* Not all the data has been transferred for this packet */
+       if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
                return 0;
-       }
 
        /* This is the last of the data for this packet */
        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-                      INCOMPLETE, 0);
+                      LAST, 1);
+
+       /* Get the packet length */
+       rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
        /* Set checksum done indicator as appropriate */
        if (netdev->features & NETIF_F_RXCSUM)
index ffea985..a713abd 100644 (file)
@@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
 {
        struct sk_buff *skb;
        u8 *packet;
-       unsigned int copy_len;
 
        skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
        if (!skb)
                return NULL;
 
-       /* Start with the header buffer which may contain just the header
+       /* Pull in the header buffer which may contain just the header
         * or the header plus data
         */
        dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
 
        packet = page_address(rdata->rx.hdr.pa.pages) +
                 rdata->rx.hdr.pa.pages_offset;
-       copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
-       copy_len = min(rdata->rx.hdr.dma_len, copy_len);
-       skb_copy_to_linear_data(skb, packet, copy_len);
-       skb_put(skb, copy_len);
-
-       len -= copy_len;
-       if (len) {
-               /* Add the remaining data as a frag */
-               dma_sync_single_range_for_cpu(pdata->dev,
-                                             rdata->rx.buf.dma_base,
-                                             rdata->rx.buf.dma_off,
-                                             rdata->rx.buf.dma_len,
-                                             DMA_FROM_DEVICE);
-
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                               rdata->rx.buf.pa.pages,
-                               rdata->rx.buf.pa.pages_offset,
-                               len, rdata->rx.buf.dma_len);
-               rdata->rx.buf.pa.pages = NULL;
-       }
+       skb_copy_to_linear_data(skb, packet, len);
+       skb_put(skb, len);
 
        return skb;
 }
 
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+                                    struct xgbe_packet_data *packet)
+{
+       /* Always zero if not the first descriptor */
+       if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+               return 0;
+
+       /* First descriptor with split header, return header length */
+       if (rdata->rx.hdr_len)
+               return rdata->rx.hdr_len;
+
+       /* First descriptor but not the last descriptor and no split header,
+        * so the full buffer was used
+        */
+       if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+               return rdata->rx.hdr.dma_len;
+
+       /* First descriptor and last descriptor and no split header, so
+        * calculate how much of the buffer was used
+        */
+       return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+                                    struct xgbe_packet_data *packet,
+                                    unsigned int len)
+{
+       /* Always the full buffer if not the last descriptor */
+       if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+               return rdata->rx.buf.dma_len;
+
+       /* Last descriptor so calculate how much of the buffer was used
+        * for the last bit of data
+        */
+       return rdata->rx.len - len;
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
@@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        struct napi_struct *napi;
        struct sk_buff *skb;
        struct skb_shared_hwtstamps *hwtstamps;
-       unsigned int incomplete, error, context_next, context;
-       unsigned int len, rdesc_len, max_len;
+       unsigned int last, error, context_next, context;
+       unsigned int len, buf1_len, buf2_len, max_len;
        unsigned int received = 0;
        int packet_count = 0;
 
@@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        if (!ring)
                return 0;
 
-       incomplete = 0;
+       last = 0;
        context_next = 0;
 
        napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -2137,9 +2155,8 @@ read_again:
                received++;
                ring->cur++;
 
-               incomplete = XGMAC_GET_BITS(packet->attributes,
-                                           RX_PACKET_ATTRIBUTES,
-                                           INCOMPLETE);
+               last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                                     LAST);
                context_next = XGMAC_GET_BITS(packet->attributes,
                                              RX_PACKET_ATTRIBUTES,
                                              CONTEXT_NEXT);
@@ -2148,7 +2165,7 @@ read_again:
                                         CONTEXT);
 
                /* Earlier error, just drain the remaining data */
-               if ((incomplete || context_next) && error)
+               if ((!last || context_next) && error)
                        goto read_again;
 
                if (error || packet->errors) {
@@ -2160,16 +2177,22 @@ read_again:
                }
 
                if (!context) {
-                       /* Length is cumulative, get this descriptor's length */
-                       rdesc_len = rdata->rx.len - len;
-                       len += rdesc_len;
+                       /* Get the data length in the descriptor buffers */
+                       buf1_len = xgbe_rx_buf1_len(rdata, packet);
+                       len += buf1_len;
+                       buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+                       len += buf2_len;
 
-                       if (rdesc_len && !skb) {
+                       if (!skb) {
                                skb = xgbe_create_skb(pdata, napi, rdata,
-                                                     rdesc_len);
-                               if (!skb)
+                                                     buf1_len);
+                               if (!skb) {
                                        error = 1;
-                       } else if (rdesc_len) {
+                                       goto skip_data;
+                               }
+                       }
+
+                       if (buf2_len) {
                                dma_sync_single_range_for_cpu(pdata->dev,
                                                        rdata->rx.buf.dma_base,
                                                        rdata->rx.buf.dma_off,
@@ -2179,13 +2202,14 @@ read_again:
                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                                rdata->rx.buf.pa.pages,
                                                rdata->rx.buf.pa.pages_offset,
-                                               rdesc_len,
+                                               buf2_len,
                                                rdata->rx.buf.dma_len);
                                rdata->rx.buf.pa.pages = NULL;
                        }
                }
 
-               if (incomplete || context_next)
+skip_data:
+               if (!last || context_next)
                        goto read_again;
 
                if (!skb)
@@ -2243,7 +2267,7 @@ next_packet:
        }
 
        /* Check if we need to save state before leaving */
-       if (received && (incomplete || context_next)) {
+       if (received && (!last || context_next)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdata->state_saved = 1;
                rdata->state.skb = skb;
index dad6362..d05fbfd 100644 (file)
@@ -98,6 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
 
        if (err < 0)
                goto err_exit;
+       ndev->mtu = new_mtu;
 
        if (netif_running(ndev)) {
                aq_ndev_close(ndev);
index 1093ea1..0592a03 100644 (file)
@@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
        .tx_rings = HW_ATL_A0_TX_RINGS,
        .rx_rings = HW_ATL_A0_RX_RINGS,
        .hw_features = NETIF_F_HW_CSUM |
+                       NETIF_F_RXCSUM |
                        NETIF_F_RXHASH |
                        NETIF_F_SG |
                        NETIF_F_TSO,
index 8bdee3d..f3957e9 100644 (file)
@@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
        .tx_rings = HW_ATL_B0_TX_RINGS,
        .rx_rings = HW_ATL_B0_RX_RINGS,
        .hw_features = NETIF_F_HW_CSUM |
+                       NETIF_F_RXCSUM |
                        NETIF_F_RXHASH |
                        NETIF_F_SG |
                        NETIF_F_TSO |
index 69015fa..365895e 100644 (file)
@@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d)
 
        bcmgenet_netif_stop(dev);
 
-       phy_suspend(priv->phydev);
+       if (!device_may_wakeup(d))
+               phy_suspend(priv->phydev);
 
        netif_device_detach(dev);
 
@@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d)
 
        netif_device_attach(dev);
 
-       phy_resume(priv->phydev);
+       if (!device_may_wakeup(d))
+               phy_resume(priv->phydev);
 
        if (priv->eee.eee_enabled)
                bcmgenet_eee_enable_set(dev, true);
index e876076..2f92819 100644 (file)
@@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
        udelay(60);
 }
 
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       u32 reg;
-
-       /* Power up PHY */
-       bcmgenet_phy_power_set(dev, true);
-       /* enable APD */
-       reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-       reg |= EXT_PWR_DN_EN_LD;
-       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       bcmgenet_mii_reset(dev);
-}
-
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
 {
        u32 reg;
@@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
 
                if (priv->internal_phy) {
                        phy_name = "internal PHY";
-                       bcmgenet_internal_phy_setup(dev);
                } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                        phy_name = "MoCA";
                        bcmgenet_moca_phy_setup(priv);
index 05c1c1d..cebfe3b 100644 (file)
@@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
                return PTR_ERR(kern_buf);
 
        rc = sscanf(kern_buf, "%x:%x", &addr, &len);
-       if (rc < 2) {
+       if (rc < 2 || len > UINT_MAX >> 2) {
                netdev_warn(bnad->netdev, "failed to read user buffer\n");
                kfree(kern_buf);
                return -EINVAL;
index 5f11b4d..b23d654 100644 (file)
@@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                                release_sub_crq_queue(adapter,
                                                      adapter->tx_scrq[i]);
                        }
+               kfree(adapter->tx_scrq);
                adapter->tx_scrq = NULL;
        }
 
@@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                                release_sub_crq_queue(adapter,
                                                      adapter->rx_scrq[i]);
                        }
+               kfree(adapter->rx_scrq);
                adapter->rx_scrq = NULL;
        }
 }
index e8c1051..0e0fa70 100644 (file)
@@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
                rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
                if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
                        /* PCI might be offline */
+
+                       /* If device removal has been requested,
+                        * do not continue retrying.
+                        */
+                       if (dev->persist->interface_state &
+                           MLX4_INTERFACE_STATE_NOWAIT) {
+                               mlx4_warn(dev,
+                                         "communication channel is offline\n");
+                               return -EIO;
+                       }
+
                        msleep(100);
                        wr_toggle = swab32(readl(&priv->mfunc.comm->
                                           slave_write));
index 21377c3..7032054 100644 (file)
@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
                               (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
                if (!offline_bit)
                        return 0;
+
+               /* If device removal has been requested,
+                * do not continue retrying.
+                */
+               if (dev->persist->interface_state &
+                   MLX4_INTERFACE_STATE_NOWAIT)
+                       break;
+
                /* There are cases as part of AER/Reset flow that PF needs
                 * around 100 msec to load. We therefore sleep for 100 msec
                 * to allow other tasks to make use of that CPU during this
@@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        struct devlink *devlink = priv_to_devlink(priv);
        int active_vfs = 0;
 
+       if (mlx4_is_slave(dev))
+               persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
+
        mutex_lock(&persist->interface_state_mutex);
        persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
        mutex_unlock(&persist->interface_state_mutex);
index caa837e..a380353 100644 (file)
@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
        case MLX5_CMD_OP_QUERY_Q_COUNTER:
+       case MLX5_CMD_OP_SET_RATE_LIMIT:
+       case MLX5_CMD_OP_QUERY_RATE_LIMIT:
        case MLX5_CMD_OP_ALLOC_PD:
        case MLX5_CMD_OP_ALLOC_UAR:
        case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(ALLOC_PD);
        MLX5_COMMAND_STR_CASE(DEALLOC_PD);
        MLX5_COMMAND_STR_CASE(ALLOC_UAR);
index f6a6ded..dc52053 100644 (file)
@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
 int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
 void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
-void mlx5e_add_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti);
-void mlx5e_del_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti);
 
 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
                            void *sp);
index 8ef64c4..66c1337 100644 (file)
@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
                                            vf_stats);
 }
 
-void mlx5e_add_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti)
+static void mlx5e_add_vxlan_port(struct net_device *netdev,
+                                struct udp_tunnel_info *ti)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
        mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
 }
 
-void mlx5e_del_vxlan_port(struct net_device *netdev,
-                         struct udp_tunnel_info *ti)
+static void mlx5e_del_vxlan_port(struct net_device *netdev,
+                                struct udp_tunnel_info *ti)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
index 2c86457..f621373 100644 (file)
@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
        .ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
        .ndo_setup_tc            = mlx5e_rep_ndo_setup_tc,
        .ndo_get_stats64         = mlx5e_rep_get_stats,
-       .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
-       .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
        .ndo_has_offload_stats   = mlx5e_has_offload_stats,
        .ndo_get_offload_stats   = mlx5e_get_offload_stats,
 };
index 3d37168..bafcb34 100644 (file)
@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        if (lro_num_seg > 1) {
                mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
                skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+               /* Subtract one since we already counted this as one
+                * "regular" packet in mlx5e_complete_rx_cqe()
+                */
+               rq->stats.packets += lro_num_seg - 1;
                rq->stats.lro_packets++;
                rq->stats.lro_bytes += cqe_bcnt;
        }
index 79481f4..fade723 100644 (file)
@@ -133,6 +133,23 @@ err_create_ft:
        return rule;
 }
 
+static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
+                                 struct mlx5e_tc_flow *flow)
+{
+       struct mlx5_fc *counter = NULL;
+
+       if (!IS_ERR(flow->rule)) {
+               counter = mlx5_flow_rule_counter(flow->rule);
+               mlx5_del_flow_rules(flow->rule);
+               mlx5_fc_destroy(priv->mdev, counter);
+       }
+
+       if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+               mlx5_destroy_flow_table(priv->fs.tc.t);
+               priv->fs.tc.t = NULL;
+       }
+}
+
 static struct mlx5_flow_handle *
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct mlx5_flow_spec *spec,
@@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
 }
 
 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
-                              struct mlx5e_tc_flow *flow) {
+                              struct mlx5e_tc_flow *flow);
+
+static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+                                 struct mlx5e_tc_flow *flow)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+       mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
+
+       mlx5_eswitch_del_vlan_action(esw, flow->attr);
+
+       if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+               mlx5e_detach_encap(priv, flow);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+                              struct mlx5e_tc_flow *flow)
+{
        struct list_head *next = flow->encap.next;
 
        list_del(&flow->encap);
@@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct mlx5_fc *counter = NULL;
-
-       if (!IS_ERR(flow->rule)) {
-               counter = mlx5_flow_rule_counter(flow->rule);
-               mlx5_del_flow_rules(flow->rule);
-               mlx5_fc_destroy(priv->mdev, counter);
-       }
-
-       if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
-               mlx5_eswitch_del_vlan_action(esw, flow->attr);
-               if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
-                       mlx5e_detach_encap(priv, flow);
-       }
-
-       if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
-               mlx5_destroy_flow_table(priv->fs.tc.t);
-               priv->fs.tc.t = NULL;
-       }
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+               mlx5e_tc_del_fdb_flow(priv, flow);
+       else
+               mlx5e_tc_del_nic_flow(priv, flow);
 }
 
 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
@@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                        skb_flow_dissector_target(f->dissector,
                                                  FLOW_DISSECTOR_KEY_ENC_PORTS,
                                                  f->mask);
+               struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+               struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+               struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 
                /* Full udp dst port must be given */
                if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
                        goto vxlan_match_offload_err;
 
-               if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
+               if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
                else {
@@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5_esw_flow_attr *attr)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       struct mlx5e_priv *up_priv = netdev_priv(up_dev);
        unsigned short family = ip_tunnel_info_af(tun_info);
        struct ip_tunnel_key *key = &tun_info->key;
        struct mlx5_encap_entry *e;
@@ -996,7 +1020,7 @@ vxlan_encap_offload_err:
                return -EOPNOTSUPP;
        }
 
-       if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
+       if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
                tunnel_type = MLX5_HEADER_TYPE_VXLAN;
        } else {
@@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                }
 
                if (is_tcf_vlan(a)) {
-                       if (tcf_vlan_action(a) == VLAN_F_POP) {
+                       if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
-                       } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+                       } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
                                if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
                                        return -EOPNOTSUPP;
 
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
                                attr->vlan = tcf_vlan_push_vid(a);
+                       } else { /* action is TCA_VLAN_ACT_MODIFY */
+                               return -EOPNOTSUPP;
                        }
                        continue;
                }
index f193128..57f5e2d 100644 (file)
@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                        sq->stats.tso_bytes += skb->len - ihs;
                }
 
+               sq->stats.packets += skb_shinfo(skb)->gso_segs;
                num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
        } else {
                bf = sq->bf_budget &&
                     !skb->xmit_more &&
                     !skb_shinfo(skb)->nr_frags;
                ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+               sq->stats.packets++;
                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
        }
 
+       sq->stats.bytes += num_bytes;
        wi->num_bytes = num_bytes;
 
        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        if (bf)
                sq->bf_budget--;
 
-       sq->stats.packets++;
-       sq->stats.bytes += num_bytes;
        return NETDEV_TX_OK;
 
 dma_unmap_wqe_err:
index 5b78883..ad329b1 100644 (file)
@@ -209,6 +209,7 @@ struct mlx5_esw_offload {
        struct mlx5_eswitch_rep *vport_reps;
        DECLARE_HASHTABLE(encap_tbl, 8);
        u8 inline_mode;
+       u64 num_flows;
 };
 
 struct mlx5_eswitch {
@@ -271,6 +272,11 @@ struct mlx5_flow_handle *
 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
                                struct mlx5_flow_spec *spec,
                                struct mlx5_esw_flow_attr *attr);
+void
+mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
+                               struct mlx5_flow_handle *rule,
+                               struct mlx5_esw_flow_attr *attr);
+
 struct mlx5_flow_handle *
 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
 
index 4f5b0d4..307ec6c 100644 (file)
@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
                                   spec, &flow_act, dest, i);
        if (IS_ERR(rule))
                mlx5_fc_destroy(esw->dev, counter);
+       else
+               esw->offloads.num_flows++;
 
        return rule;
 }
 
+void
+mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
+                               struct mlx5_flow_handle *rule,
+                               struct mlx5_esw_flow_attr *attr)
+{
+       struct mlx5_fc *counter = NULL;
+
+       if (!IS_ERR(rule)) {
+               counter = mlx5_flow_rule_counter(rule);
+               mlx5_del_flow_rules(rule);
+               mlx5_fc_destroy(esw->dev, counter);
+               esw->offloads.num_flows--;
+       }
+}
+
 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
 {
        struct mlx5_eswitch_rep *rep;
@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
            MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
                return -EOPNOTSUPP;
 
+       if (esw->offloads.num_flows > 0) {
+               esw_warn(dev, "Can't set inline mode when flows are configured\n");
+               return -EOPNOTSUPP;
+       }
+
        err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
        if (err)
                goto out;
index e2bd600..60154a1 100644 (file)
@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
        [2] = {
                .mask           = MLX5_PROF_MASK_QP_SIZE |
                                  MLX5_PROF_MASK_MR_CACHE,
-               .log_max_qp     = 17,
+               .log_max_qp     = 18,
                .mr_cache[0]    = {
                        .size   = 500,
                        .limit  = 250
index 334bcc6..50d2826 100644 (file)
@@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
        tnl.type = (u16)efx_tunnel_type;
        tnl.port = ti->port;
 
-       if (efx->type->udp_tnl_add_port)
+       if (efx->type->udp_tnl_del_port)
                (void)efx->type->udp_tnl_del_port(efx, tnl);
 }
 
index 296c8ef..9e63195 100644 (file)
@@ -74,15 +74,21 @@ config TI_CPSW
          will be called cpsw.
 
 config TI_CPTS
-       tristate "TI Common Platform Time Sync (CPTS) Support"
+       bool "TI Common Platform Time Sync (CPTS) Support"
        depends on TI_CPSW || TI_KEYSTONE_NETCP
-       imply PTP_1588_CLOCK
+       depends on PTP_1588_CLOCK
        ---help---
          This driver supports the Common Platform Time Sync unit of
          the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
          The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
          driver offers a PTP Hardware Clock.
 
+config TI_CPTS_MOD
+       tristate
+       depends on TI_CPTS
+       default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+       default m
+
 config TI_KEYSTONE_NETCP
        tristate "TI Keystone NETCP Core Support"
        select TI_CPSW_ALE
index 1e7c10b..10e6b0c 100644 (file)
@@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
 obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
 obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
-obj-$(CONFIG_TI_CPTS) += cpts.o
+obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
 ti_cpsw-y := cpsw.o
 
index b75d9cd..ae48c80 100644 (file)
@@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
+
 static int fjes_request_irq(struct fjes_adapter *);
 static void fjes_free_irq(struct fjes_adapter *);
 
@@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
 static int fjes_poll(struct napi_struct *, int);
 
 static const struct acpi_device_id fjes_acpi_ids[] = {
-       {"PNP0C02", 0},
+       {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
@@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
        },
 };
 
-static int fjes_acpi_add(struct acpi_device *device)
+static bool is_extended_socket_device(struct acpi_device *device)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
        char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
-       struct platform_device *plat_dev;
        union acpi_object *str;
        acpi_status status;
        int result;
 
        status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
        if (ACPI_FAILURE(status))
-               return -ENODEV;
+               return false;
 
        str = buffer.pointer;
        result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
@@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
 
        if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
                kfree(buffer.pointer);
-               return -ENODEV;
+               return false;
        }
        kfree(buffer.pointer);
 
+       return true;
+}
+
+static int acpi_check_extended_socket_status(struct acpi_device *device)
+{
+       unsigned long long sta;
+       acpi_status status;
+
+       status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
+             (sta & ACPI_STA_DEVICE_ENABLED) &&
+             (sta & ACPI_STA_DEVICE_UI) &&
+             (sta & ACPI_STA_DEVICE_FUNCTIONING)))
+               return -ENODEV;
+
+       return 0;
+}
+
+static int fjes_acpi_add(struct acpi_device *device)
+{
+       struct platform_device *plat_dev;
+       acpi_status status;
+
+       if (!is_extended_socket_device(device))
+               return -ENODEV;
+
+       if (acpi_check_extended_socket_status(device))
+               return -ENODEV;
+
        status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
                                     fjes_get_acpi_resource, fjes_resource);
        if (ACPI_FAILURE(status))
@@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
        netdev->min_mtu = fjes_support_mtu[0];
        netdev->max_mtu = fjes_support_mtu[3];
        netdev->flags |= IFF_BROADCAST;
-       netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 }
 
 static void fjes_irq_watch_task(struct work_struct *work)
@@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
        }
 }
 
+static acpi_status
+acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
+                                void *context, void **return_value)
+{
+       struct acpi_device *device;
+       bool *found = context;
+       int result;
+
+       result = acpi_bus_get_device(obj_handle, &device);
+       if (result)
+               return AE_OK;
+
+       if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
+               return AE_OK;
+
+       if (!is_extended_socket_device(device))
+               return AE_OK;
+
+       if (acpi_check_extended_socket_status(device))
+               return AE_OK;
+
+       *found = true;
+       return AE_CTRL_TERMINATE;
+}
+
 /* fjes_init_module - Driver Registration Routine */
 static int __init fjes_init_module(void)
 {
+       bool found = false;
        int result;
 
+       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+                           acpi_find_extended_socket_device, NULL, &found,
+                           NULL);
+
+       if (!found)
+               return -ENODEV;
+
        pr_info("%s - version %s - %s\n",
                fjes_driver_string, fjes_driver_version, fjes_copyright);
 
index 4c1d8cc..8dd0b87 100644 (file)
@@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context)
                return;
 
        net_device = net_device_to_netvsc_device(ndev);
-       if (unlikely(net_device->destroy) &&
-           netvsc_channel_idle(net_device, q_idx))
+       if (unlikely(!net_device))
+               return;
+
+       if (unlikely(net_device->destroy &&
+                    netvsc_channel_idle(net_device, q_idx)))
                return;
 
        /* commit_rd_index() -> hv_signal_on_read() needs this. */
index 34cc3c5..cc88cd7 100644 (file)
@@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                return -EINVAL;
 
        tun->set_features = features;
+       tun->dev->wanted_features &= ~TUN_USER_FEATURES;
+       tun->dev->wanted_features |= features;
        netdev_update_features(tun->dev);
 
        return 0;
index 8056745..156f7f8 100644 (file)
@@ -580,6 +580,10 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* Motorola Mapphone devices with MDM6600 */
+               USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
 
        /* 2. Combined interface devices matching on class+protocol */
        {       /* Huawei E367 and possibly others in "Windows mode" */
@@ -925,6 +929,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+       {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
index 986243c..0b1b918 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "8"
+#define NET_VERSION            "9"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@ enum rtl_register_content {
 #define RTL8153_RMS            RTL8153_MAX_PACKET
 #define RTL8152_TX_TIMEOUT     (5 * HZ)
 #define RTL8152_NAPI_WEIGHT    64
+#define rx_reserved_size(x)    ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+                                sizeof(struct rx_desc) + RX_ALIGN)
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -1362,6 +1364,7 @@ static int alloc_all_mem(struct r8152 *tp)
        spin_lock_init(&tp->rx_lock);
        spin_lock_init(&tp->tx_lock);
        INIT_LIST_HEAD(&tp->tx_free);
+       INIT_LIST_HEAD(&tp->rx_done);
        skb_queue_head_init(&tp->tx_queue);
        skb_queue_head_init(&tp->rx_queue);
 
@@ -2252,8 +2255,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
 
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
-       u32 mtu = tp->netdev->mtu;
-       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
+       u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
 
        ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -2898,7 +2900,8 @@ static void r8153_first_init(struct r8152 *tp)
 
        rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+       ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
@@ -2950,7 +2953,8 @@ static void r8153_enter_oob(struct r8152 *tp)
                usleep_range(1000, 2000);
        }
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+       ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
        ocp_data &= ~TEREDO_WAKE_MASK;
@@ -4200,8 +4204,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
 
        dev->mtu = new_mtu;
 
-       if (netif_running(dev) && netif_carrier_ok(dev))
-               r8153_set_rx_early_size(tp);
+       if (netif_running(dev)) {
+               u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
+
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
+
+               if (netif_carrier_ok(dev))
+                       r8153_set_rx_early_size(tp);
+       }
 
        mutex_unlock(&tp->control);
 
index fea687f..d6988db 100644 (file)
@@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
        }
 
        if (rt6_local) {
-               if (rt6_local->rt6i_idev)
+               if (rt6_local->rt6i_idev) {
                        in6_dev_put(rt6_local->rt6i_idev);
+                       rt6_local->rt6i_idev = NULL;
+               }
 
                dst = &rt6_local->dst;
                dev_put(dst->dev);
index 33fb268..d9f37ee 100644 (file)
@@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
        .rtc_soc_base_address                   = 0x00000800,
        .rtc_wmac_base_address                  = 0x00001000,
        .soc_core_base_address                  = 0x0003a000,
-       .wlan_mac_base_address                  = 0x00020000,
+       .wlan_mac_base_address                  = 0x00010000,
        .ce_wrapper_base_address                = 0x00034000,
        .ce0_base_address                       = 0x00034400,
        .ce1_base_address                       = 0x00034800,
index d37b169..6927cae 100644 (file)
@@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-       /* Called when we need to transmit (a) frame(s) from agg queue */
+       /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
 
        iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
                                          tids, more_data, true);
@@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
                struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 
-               if (tid_data->state != IWL_AGG_ON &&
+               if (!iwl_mvm_is_dqa_supported(mvm) &&
+                   tid_data->state != IWL_AGG_ON &&
                    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
                        continue;
 
index bd1dcc8..b51a285 100644 (file)
@@ -3135,7 +3135,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       struct ieee80211_sta *sta,
                                       enum ieee80211_frame_release_type reason,
                                       u16 cnt, u16 tids, bool more_data,
-                                      bool agg)
+                                      bool single_sta_queue)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd cmd = {
@@ -3155,14 +3155,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
        for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
                cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
 
-       /* If we're releasing frames from aggregation queues then check if the
-        * all queues combined that we're releasing frames from have
+       /* If we're releasing frames from aggregation or dqa queues then check
+        * if all the queues that we're releasing frames from, combined, have:
         *  - more frames than the service period, in which case more_data
         *    needs to be set
         *  - fewer than 'cnt' frames, in which case we need to adjust the
         *    firmware command (but do that unconditionally)
         */
-       if (agg) {
+       if (single_sta_queue) {
                int remaining = cnt;
                int sleep_tx_count;
 
@@ -3172,7 +3172,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                        u16 n_queued;
 
                        tid_data = &mvmsta->tid_data[tid];
-                       if (WARN(tid_data->state != IWL_AGG_ON &&
+                       if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
+                                tid_data->state != IWL_AGG_ON &&
                                 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
                                 "TID %d state is %d\n",
                                 tid, tid_data->state)) {
index 4be34f9..1927ce6 100644 (file)
@@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       struct ieee80211_sta *sta,
                                       enum ieee80211_frame_release_type reason,
                                       u16 cnt, u16 tids, bool more_data,
-                                      bool agg);
+                                      bool single_sta_queue);
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain);
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
index dd2b4a3..3f37075 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -628,8 +629,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * values.
         * Note that we don't need to make sure it isn't agg'd, since we're
         * TXing non-sta
+        * For DQA mode - we shouldn't increase it though
         */
-       atomic_inc(&mvm->pending_frames[sta_id]);
+       if (!iwl_mvm_is_dqa_supported(mvm))
+               atomic_inc(&mvm->pending_frames[sta_id]);
 
        return 0;
 }
@@ -1005,11 +1008,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        spin_unlock(&mvmsta->lock);
 
-       /* Increase pending frames count if this isn't AMPDU */
-       if ((iwl_mvm_is_dqa_supported(mvm) &&
-            mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
-            mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
-           (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
+       /* Increase pending frames count if this isn't AMPDU or DQA queue */
+       if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
                atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
        return 0;
@@ -1079,12 +1079,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvmsta->lock);
 
        if ((tid_data->state == IWL_AGG_ON ||
-            tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+            tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
+            iwl_mvm_is_dqa_supported(mvm)) &&
            iwl_mvm_tid_queued(tid_data) == 0) {
                /*
-                * Now that this aggregation queue is empty tell mac80211 so it
-                * knows we no longer have frames buffered for the station on
-                * this TID (for the TIM bitmap calculation.)
+                * Now that this aggregation or DQA queue is empty tell
+                * mac80211 so it knows we no longer have frames buffered for
+                * the station on this TID (for the TIM bitmap calculation.)
                 */
                ieee80211_sta_set_buffered(sta, tid, false);
        }
@@ -1257,7 +1258,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        u8 skb_freed = 0;
        u16 next_reclaimed, seq_ctl;
        bool is_ndp = false;
-       bool txq_agg = false; /* Is this TXQ aggregated */
 
        __skb_queue_head_init(&skbs);
 
@@ -1283,6 +1283,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        info->flags |= IEEE80211_TX_STAT_ACK;
                        break;
                case TX_STATUS_FAIL_DEST_PS:
+                       /* In DQA, the FW should have stopped the queue and not
+                        * return this status
+                        */
+                       WARN_ON(iwl_mvm_is_dqa_supported(mvm));
                        info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
                        break;
                default:
@@ -1387,15 +1391,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        bool send_eosp_ndp = false;
 
                        spin_lock_bh(&mvmsta->lock);
-                       if (iwl_mvm_is_dqa_supported(mvm)) {
-                               enum iwl_mvm_agg_state state;
-
-                               state = mvmsta->tid_data[tid].state;
-                               txq_agg = (state == IWL_AGG_ON ||
-                                       state == IWL_EMPTYING_HW_QUEUE_DELBA);
-                       } else {
-                               txq_agg = txq_id >= mvm->first_agg_queue;
-                       }
 
                        if (!is_ndp) {
                                tid_data->next_reclaimed = next_reclaimed;
@@ -1452,11 +1447,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
         * If the txq is not an AMPDU queue, there is no chance we freed
         * several skbs. Check that out...
         */
-       if (txq_agg)
+       if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
                goto out;
 
        /* We can't free more than one frame at once on a shared queue */
-       WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
+       WARN_ON(skb_freed > 1);
 
        /* If we have still frames for this STA nothing to do here */
        if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
index 5ebca1d..b62e03d 100644 (file)
@@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
  * In case of any errors during inittialization, this function also ensures
  * proper cleanup before exiting.
  */
-static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
-                           void **padapter)
+static int mwifiex_register(void *card, struct device *dev,
+                           struct mwifiex_if_ops *if_ops, void **padapter)
 {
        struct mwifiex_adapter *adapter;
        int i;
@@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
                return -ENOMEM;
 
        *padapter = adapter;
+       adapter->dev = dev;
        adapter->card = card;
 
        /* Save interface specific operations in adapter */
@@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
 {
        struct mwifiex_adapter *adapter;
 
-       if (mwifiex_register(card, if_ops, (void **)&adapter)) {
+       if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
                pr_err("%s: software init failed\n", __func__);
                goto err_init_sw;
        }
 
-       adapter->dev = dev;
        mwifiex_probe_of(adapter);
 
        adapter->iface_type = iface_type;
@@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
        wiphy_unregister(adapter->wiphy);
        wiphy_free(adapter->wiphy);
 
+       if (adapter->irq_wakeup >= 0)
+               device_init_wakeup(adapter->dev, false);
+
        /* Unregister device */
        mwifiex_dbg(adapter, INFO,
                    "info: unregister device\n");
index a0d9180..b8c990d 100644 (file)
@@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
        schedule_work(&card->work);
 }
 
+static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       if (reg->sleep_cookie)
+               mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
+       mwifiex_pcie_delete_cmdrsp_buf(adapter);
+       mwifiex_pcie_delete_evtbd_ring(adapter);
+       mwifiex_pcie_delete_rxbd_ring(adapter);
+       mwifiex_pcie_delete_txbd_ring(adapter);
+       card->cmdrsp_buf = NULL;
+}
+
 /*
  * This function initializes the PCI-E host memory space, WCB rings, etc.
  *
@@ -2850,13 +2865,6 @@ err_enable_dev:
 
 /*
  * This function cleans up the allocated card buffers.
- *
- * The following are freed by this function -
- *      - TXBD ring buffers
- *      - RXBD ring buffers
- *      - Event BD ring buffers
- *      - Command response ring buffer
- *      - Sleep cookie buffer
  */
 static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
 {
@@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
                                    "Failed to write driver not-ready signature\n");
        }
 
+       mwifiex_pcie_free_buffers(adapter);
+
        if (pdev) {
                pci_iounmap(pdev, card->pci_mmap);
                pci_iounmap(pdev, card->pci_mmap1);
@@ -3126,10 +3136,7 @@ err_cre_txbd:
        pci_iounmap(pdev, card->pci_mmap1);
 }
 
-/* This function cleans up the PCI-E host memory space.
- * Some code is extracted from mwifiex_unregister_dev()
- *
- */
+/* This function cleans up the PCI-E host memory space. */
 static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
@@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 
        adapter->seq_num = 0;
 
-       if (reg->sleep_cookie)
-               mwifiex_pcie_delete_sleep_cookie_buf(adapter);
-
-       mwifiex_pcie_delete_cmdrsp_buf(adapter);
-       mwifiex_pcie_delete_evtbd_ring(adapter);
-       mwifiex_pcie_delete_rxbd_ring(adapter);
-       mwifiex_pcie_delete_txbd_ring(adapter);
-       card->cmdrsp_buf = NULL;
+       mwifiex_pcie_free_buffers(adapter);
 }
 
 static struct mwifiex_if_ops pcie_ops = {
index 65f86bc..1dc43fc 100644 (file)
@@ -76,7 +76,7 @@ config QCOM_ADSP_PIL
        depends on OF && ARCH_QCOM
        depends on REMOTEPROC
        depends on QCOM_SMEM
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
        select MFD_SYSCON
        select QCOM_MDT_LOADER
        select QCOM_RPROC_COMMON
@@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL
        depends on OF && ARCH_QCOM
        depends on QCOM_SMEM
        depends on REMOTEPROC
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
        select MFD_SYSCON
        select QCOM_RPROC_COMMON
        select QCOM_SCM
@@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL
 config QCOM_WCNSS_PIL
        tristate "Qualcomm WCNSS Peripheral Image Loader"
        depends on OF && ARCH_QCOM
-       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+       depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
        depends on QCOM_SMEM
        depends on REMOTEPROC
        select QCOM_MDT_LOADER
index 4bf55b5..3c52867 100644 (file)
@@ -1253,20 +1253,6 @@ config SCSI_LPFC_DEBUG_FS
          This makes debugging information from the lpfc driver
          available via the debugfs filesystem.
 
-config LPFC_NVME_INITIATOR
-       bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
-       depends on SCSI_LPFC && NVME_FC
-       ---help---
-         This enables NVME Initiator support in the Emulex lpfc driver.
-
-config LPFC_NVME_TARGET
-       bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
-       depends on SCSI_LPFC && NVME_TARGET_FC
-       ---help---
-         This enables NVME Target support in the Emulex lpfc driver.
-         Target enablement must still be enabled on a per adapter
-         basis by module parameters.
-
 config SCSI_SIM710
        tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
        depends on (EISA || MCA) && SCSI
index 524a0c7..0d0be77 100644 (file)
@@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        /* fill_cmd can't fail here, no data buffer to map. */
        (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
                        scsi3addr, TYPE_MSG);
-       rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
        if (rc) {
                dev_warn(&h->pdev->dev, "Failed to send reset command\n");
                goto out;
@@ -3714,7 +3714,7 @@ exit_failed:
  *  # (integer code indicating one of several NOT READY states
  *     describing why a volume is to be kept offline)
  */
-static int hpsa_volume_offline(struct ctlr_info *h,
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
                                        unsigned char scsi3addr[])
 {
        struct CommandList *c;
@@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
                                        DEFAULT_TIMEOUT);
        if (rc) {
                cmd_free(h, c);
-               return 0;
+               return HPSA_VPD_LV_STATUS_UNSUPPORTED;
        }
        sense = c->err_info->SenseInfo;
        if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
        cmd_status = c->err_info->CommandStatus;
        scsi_status = c->err_info->ScsiStatus;
        cmd_free(h, c);
-       /* Is the volume 'not ready'? */
-       if (cmd_status != CMD_TARGET_STATUS ||
-               scsi_status != SAM_STAT_CHECK_CONDITION ||
-               sense_key != NOT_READY ||
-               asc != ASC_LUN_NOT_READY)  {
-               return 0;
-       }
 
        /* Determine the reason for not ready state */
        ldstat = hpsa_get_volume_status(h, scsi3addr);
 
        /* Keep volume offline in certain cases: */
        switch (ldstat) {
+       case HPSA_LV_FAILED:
        case HPSA_LV_UNDERGOING_ERASE:
        case HPSA_LV_NOT_AVAILABLE:
        case HPSA_LV_UNDERGOING_RPI:
@@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
        default:
                break;
        }
-       return 0;
+       return HPSA_LV_OK;
 }
 
 /*
@@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        /* Do an inquiry to the device to see what it is. */
        if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
                (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
-               /* Inquiry failed (msg printed already) */
                dev_err(&h->pdev->dev,
-                       "hpsa_update_device_info: inquiry failed\n");
-               rc = -EIO;
+                       "%s: inquiry failed, device will be skipped.\n",
+                       __func__);
+               rc = HPSA_INQUIRY_FAILED;
                goto bail_out;
        }
 
@@ -3885,15 +3879,19 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        if ((this_device->devtype == TYPE_DISK ||
                this_device->devtype == TYPE_ZBC) &&
                is_logical_dev_addr_mode(scsi3addr)) {
-               int volume_offline;
+               unsigned char volume_offline;
 
                hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
                if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
                        hpsa_get_ioaccel_status(h, scsi3addr, this_device);
                volume_offline = hpsa_volume_offline(h, scsi3addr);
-               if (volume_offline < 0 || volume_offline > 0xff)
-                       volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
-               this_device->volume_offline = volume_offline & 0xff;
+               if (volume_offline == HPSA_LV_FAILED) {
+                       rc = HPSA_LV_FAILED;
+                       dev_err(&h->pdev->dev,
+                               "%s: LV failed, device will be skipped.\n",
+                               __func__);
+                       goto bail_out;
+               }
        } else {
                this_device->raid_level = RAID_UNKNOWN;
                this_device->offload_config = 0;
@@ -4379,8 +4377,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
                        goto out;
                }
                if (rc) {
-                       dev_warn(&h->pdev->dev,
-                               "Inquiry failed, skipping device.\n");
+                       h->drv_req_rescan = 1;
                        continue;
                }
 
@@ -5558,7 +5555,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
 
        spin_lock_irqsave(&h->scan_lock, flags);
        h->scan_finished = 1;
-       wake_up_all(&h->scan_wait_queue);
+       wake_up(&h->scan_wait_queue);
        spin_unlock_irqrestore(&h->scan_lock, flags);
 }
 
@@ -5576,11 +5573,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
        if (unlikely(lockup_detected(h)))
                return hpsa_scan_complete(h);
 
+       /*
+        * If a scan is already waiting to run, no need to add another
+        */
+       spin_lock_irqsave(&h->scan_lock, flags);
+       if (h->scan_waiting) {
+               spin_unlock_irqrestore(&h->scan_lock, flags);
+               return;
+       }
+
+       spin_unlock_irqrestore(&h->scan_lock, flags);
+
        /* wait until any scan already in progress is finished. */
        while (1) {
                spin_lock_irqsave(&h->scan_lock, flags);
                if (h->scan_finished)
                        break;
+               h->scan_waiting = 1;
                spin_unlock_irqrestore(&h->scan_lock, flags);
                wait_event(h->scan_wait_queue, h->scan_finished);
                /* Note: We don't need to worry about a race between this
@@ -5590,6 +5599,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
                 */
        }
        h->scan_finished = 0; /* mark scan as in progress */
+       h->scan_waiting = 0;
        spin_unlock_irqrestore(&h->scan_lock, flags);
 
        if (unlikely(lockup_detected(h)))
@@ -8792,6 +8802,7 @@ reinit_after_soft_reset:
        init_waitqueue_head(&h->event_sync_wait_queue);
        mutex_init(&h->reset_mutex);
        h->scan_finished = 1; /* no scan currently in progress */
+       h->scan_waiting = 0;
 
        pci_set_drvdata(pdev, h);
        h->ndevices = 0;
index bf6cdc1..6f04f2a 100644 (file)
@@ -201,6 +201,7 @@ struct ctlr_info {
        dma_addr_t              errinfo_pool_dhandle;
        unsigned long           *cmd_pool_bits;
        int                     scan_finished;
+       u8                      scan_waiting : 1;
        spinlock_t              scan_lock;
        wait_queue_head_t       scan_wait_queue;
 
index a584cdf..5961705 100644 (file)
 #define CFGTBL_BusType_Fibre2G  0x00000200l
 
 /* VPD Inquiry types */
+#define HPSA_INQUIRY_FAILED            0x02
 #define HPSA_VPD_SUPPORTED_PAGES        0x00
 #define HPSA_VPD_LV_DEVICE_ID           0x83
 #define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
 /* Logical volume states */
 #define HPSA_VPD_LV_STATUS_UNSUPPORTED                 0xff
 #define HPSA_LV_OK                                      0x0
+#define HPSA_LV_FAILED                                 0x01
 #define HPSA_LV_NOT_AVAILABLE                          0x0b
 #define HPSA_LV_UNDERGOING_ERASE                       0x0F
 #define HPSA_LV_UNDERGOING_RPI                         0x12
index 5c3be3e..22819af 100644 (file)
@@ -3315,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
  * Supported Values:  1 - register just FCP
  *                    3 - register both FCP and NVME
- * Supported values are [1,3]. Default value is 3
+ * Supported values are [1,3]. Default value is 1
  */
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
            LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
            "Define fc4 type to register with fabric.");
 
index 2697d49..6cc561b 100644 (file)
@@ -5891,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                /* Check to see if it matches any module parameter */
                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
                        if (wwn == lpfc_enable_nvmet[i]) {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6017 NVME Target %016llx\n",
                                                wwn);
                                phba->nvmet_support = 1; /* a match */
+#else
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "6021 Can't enable NVME Target."
+                                               " NVME_TARGET_FC infrastructure"
+                                               " is not in kernel\n");
+#endif
                        }
                }
        }
index 0a4c190..0024de1 100644 (file)
@@ -2149,7 +2149,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
        /* localport is allocated from the stack, but the registration
         * call allocates heap memory as well as the private area.
         */
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
        ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
                                         &vport->phba->pcidev->dev, &localport);
 #else
@@ -2190,7 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 void
 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
 {
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
        struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2274,7 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
 int
 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
        int ret = 0;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
@@ -2403,7 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 void
 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
        int ret;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
index b7739a5..7ca868f 100644 (file)
@@ -671,7 +671,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
        lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
                                           NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
 
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
                                             &phba->pcidev->dev,
                                             &phba->targetport);
@@ -756,7 +756,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
 void
 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
 {
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_tgtport *tgtp;
 
        if (phba->nvmet_support == 0)
@@ -788,7 +788,7 @@ static void
 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                           struct hbq_dmabuf *nvmebuf)
 {
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
        struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -891,7 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                            struct rqb_dmabuf *nvmebuf,
                            uint64_t isr_timestamp)
 {
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
index e7e5974..2b209bb 100644 (file)
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "07.701.16.00-rc1"
-#define MEGASAS_RELDATE                                "February 2, 2017"
+#define MEGASAS_VERSION                                "07.701.17.00-rc1"
+#define MEGASAS_RELDATE                                "March 2, 2017"
 
 /*
  * Device IDs
index 7ac9a9e..0016f12 100644 (file)
@@ -1963,6 +1963,9 @@ scan_target:
        if (!mr_device_priv_data)
                return -ENOMEM;
        sdev->hostdata = mr_device_priv_data;
+
+       atomic_set(&mr_device_priv_data->r1_ldio_hint,
+                  instance->r1_ldio_hint_default);
        return 0;
 }
 
@@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
                                         &instance->irq_context[j]);
                        /* Retry irq register for IO_APIC*/
                        instance->msix_vectors = 0;
-                       if (is_probe)
+                       if (is_probe) {
+                               pci_free_irq_vectors(instance->pdev);
                                return megasas_setup_irqs_ioapic(instance);
-                       else
+                       } else {
                                return -1;
+                       }
                }
        }
        return 0;
@@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
                        MPI2_REPLY_POST_HOST_INDEX_OFFSET);
        }
 
-       i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
-       if (i < 0)
-               goto fail_setup_irqs;
+       if (!instance->msix_vectors) {
+               i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+               if (i < 0)
+                       goto fail_setup_irqs;
+       }
 
        dev_info(&instance->pdev->dev,
                "firmware supports msix\t: (%d)", fw_msix_count);
index 29650ba..f990ab4 100644 (file)
@@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
                                cpu_sel = MR_RAID_CTX_CPUSEL_1;
 
                        if (is_stream_detected(rctx_g35) &&
-                           (raid->level == 5) &&
+                           ((raid->level == 5) || (raid->level == 6)) &&
                            (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
                            (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
                                cpu_sel = MR_RAID_CTX_CPUSEL_0;
@@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                                fp_possible = false;
                                atomic_dec(&instance->fw_outstanding);
                        } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
-                                  atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
+                                  (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
                                fp_possible = false;
                                atomic_dec(&instance->fw_outstanding);
                                if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
index 67c0d5a..de95293 100644 (file)
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
        depends on PCI && SCSI
        depends on SCSI_FC_ATTRS
        select FW_LOADER
+       select BTREE
        ---help---
        This qla2xxx driver supports all QLogic Fibre Channel
        PCI and PCIe host adapters.
index f610103..435ff7f 100644 (file)
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
        }
 
-       BUG_ON(atomic_read(&vha->vref_count));
-
        qla2x00_free_fcports(vha);
 
        mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
            vha->gnl.ldma);
 
-       if (vha->qpair->vp_idx == vha->vp_idx) {
+       if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
                if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
                        ql_log(ql_log_warn, vha, 0x7087,
                            "Queue Pair delete failed.\n");
index e1fc4e6..c6bffe9 100644 (file)
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
 #define ql_dbg_tgt     0x00004000 /* Target mode */
 #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
 #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif  0x00000800 /* Target mode dif */
 
 extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
        uint32_t, void **);
index 625d438..ae11901 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/firmware.h>
 #include <linux/aer.h>
 #include <linux/mutex.h>
+#include <linux/btree.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
                        struct completion comp;
                } abt;
                struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
                struct {
-                       __le16 in_mb[28];       /* fr fw */
-                       __le16 out_mb[28];      /* to fw */
+                       __le16 in_mb[MAX_IOCB_MB_REG];  /* from FW */
+                       __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
                        void *out, *in;
                        dma_addr_t out_dma, in_dma;
+                       struct completion comp;
+                       int rc;
                } mbx;
                struct {
                        struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
        uint32_t handle;
        uint16_t flags;
        uint16_t type;
-       char *name;
+       const char *name;
        int iocbs;
        struct qla_qpair *qpair;
        u32 gen1;       /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
        struct ct_sns_desc ct_desc;
        enum discovery_state disc_state;
        enum login_state fw_login_state;
+       unsigned long plogi_nack_done_deadline;
+
        u32 login_gen, last_login_gen;
        u32 rscn_gen, last_rscn_gen;
        u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
        uint32_t gold_fw_version;
 };
 
+struct qla_dif_statistics {
+       uint64_t dif_input_bytes;
+       uint64_t dif_output_bytes;
+       uint64_t dif_input_requests;
+       uint64_t dif_output_requests;
+       uint32_t dif_guard_err;
+       uint32_t dif_ref_tag_err;
+       uint32_t dif_app_tag_err;
+};
+
 struct qla_statistics {
        uint32_t total_isp_aborts;
        uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
        uint32_t stat_max_pend_cmds;
        uint32_t stat_max_qfull_cmds_alloc;
        uint32_t stat_max_qfull_cmds_dropped;
+
+       struct qla_dif_statistics qla_dif_stats;
 };
 
 struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
        unsigned long long transfer_bytes;
 };
 
+struct qla_tc_param {
+       struct scsi_qla_host *vha;
+       uint32_t blk_sz;
+       uint32_t bufflen;
+       struct scatterlist *sg;
+       struct scatterlist *prot_sg;
+       struct crc_context *ctx;
+       uint8_t *ctx_dsd_alloced;
+};
+
 /* Multi queue support */
 #define MBC_INITIALIZE_MULTIQ 0x1f
 #define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
        uint8_t tgt_node_name[WWN_SIZE];
 
        struct dentry *dfs_tgt_sess;
+       struct dentry *dfs_tgt_port_database;
+
        struct list_head q_full_list;
        uint32_t num_pend_cmds;
        uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
        spinlock_t sess_lock;
        int rspq_vector_cpuid;
        spinlock_t atio_lock ____cacheline_aligned;
+       struct btree_head32 host_map;
 };
 
 #define MAX_QFULL_CMDS_ALLOC   8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
 
 #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75      /* 75 percent */
 
+#define QLA_EARLY_LINKUP(_ha) \
+       ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+        _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
                uint32_t        fawwpn_enabled:1;
                uint32_t        exlogins_enabled:1;
                uint32_t        exchoffld_enabled:1;
-               /* 35 bits */
+
+               uint32_t        lip_ae:1;
+               uint32_t        n2n_ae:1;
+               uint32_t        fw_started:1;
+               uint32_t        fw_init_done:1;
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
 #define P2P_LOOP  3
        uint8_t         interrupts_on;
        uint32_t        isp_abort_cnt;
-
 #define PCI_DEVICE_ID_QLOGIC_ISP2532    0x2532
 #define PCI_DEVICE_ID_QLOGIC_ISP8432    0x8432
 #define PCI_DEVICE_ID_QLOGIC_ISP8001   0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
        struct list_head vp_fcports;    /* list of fcports */
        struct list_head work_list;
        spinlock_t work_lock;
+       struct work_struct iocb_work;
 
        /* Commonly used flags and state information. */
        struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
        /* Count of active session/fcport */
        int fcport_count;
        wait_queue_head_t fcport_waitQ;
+       wait_queue_head_t vref_waitq;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
        mb();                                           \
        if (__vha->flags.delete_progress) {             \
                atomic_dec(&__vha->vref_count);         \
+               wake_up(&__vha->vref_waitq);            \
                __bail = 1;                             \
        } else {                                        \
                __bail = 0;                             \
        }                                               \
 } while (0)
 
-#define QLA_VHA_MARK_NOT_BUSY(__vha)                   \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do {              \
        atomic_dec(&__vha->vref_count);                 \
+       wake_up(&__vha->vref_waitq);                    \
+} while (0)                                            \
 
 #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do {      \
        atomic_inc(&__qpair->ref_count);                \
index b48cce6..989e17b 100644 (file)
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
        struct fc_port *sess = NULL;
-       struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 
-       seq_printf(s, "%s\n",vha->host_str);
+       seq_printf(s, "%s\n", vha->host_str);
        if (tgt) {
-               seq_printf(s, "Port ID   Port Name                Handle\n");
+               seq_puts(s, "Port ID   Port Name                Handle\n");
 
                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
                list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
        return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
 }
 
-
 static const struct file_operations dfs_tgt_sess_ops = {
        .open           = qla2x00_dfs_tgt_sess_open,
        .read           = seq_read,
@@ -52,6 +51,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
        .release        = single_release,
 };
 
+static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+       scsi_qla_host_t *vha = s->private;
+       struct qla_hw_data *ha = vha->hw;
+       struct gid_list_info *gid_list;
+       dma_addr_t gid_list_dma;
+       fc_port_t fc_port;
+       char *id_iter;
+       int rc, i;
+       uint16_t entries, loop_id;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+       seq_printf(s, "%s\n", vha->host_str);
+       if (tgt) {
+               gid_list = dma_alloc_coherent(&ha->pdev->dev,
+                   qla2x00_gid_list_size(ha),
+                   &gid_list_dma, GFP_KERNEL);
+               if (!gid_list) {
+                       ql_dbg(ql_dbg_user, vha, 0x705c,
+                           "DMA allocation failed for %u\n",
+                            qla2x00_gid_list_size(ha));
+                       return 0;
+               }
+
+               rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+                   &entries);
+               if (rc != QLA_SUCCESS)
+                       goto out_free_id_list;
+
+               id_iter = (char *)gid_list;
+
+               seq_puts(s, "Port Name  Port ID         Loop ID\n");
+
+               for (i = 0; i < entries; i++) {
+                       struct gid_list_info *gid =
+                           (struct gid_list_info *)id_iter;
+                       loop_id = le16_to_cpu(gid->loop_id);
+                       memset(&fc_port, 0, sizeof(fc_port_t));
+
+                       fc_port.loop_id = loop_id;
+
+                       rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+                       seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
+                               fc_port.port_name, fc_port.d_id.b.domain,
+                               fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+                               fc_port.loop_id);
+                       id_iter += ha->gid_list_info_size;
+               }
+out_free_id_list:
+               dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+                   gid_list, gid_list_dma);
+       }
+
+       return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+       scsi_qla_host_t *vha = inode->i_private;
+
+       return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+       .open           = qla2x00_dfs_tgt_port_database_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int
 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
 {
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
        seq_printf(s, "num Q full sent = %lld\n",
                vha->tgt_counters.num_q_full_sent);
 
+       /* DIF stats */
+       seq_printf(s, "DIF Inp Bytes = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_input_bytes);
+       seq_printf(s, "DIF Outp Bytes = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_output_bytes);
+       seq_printf(s, "DIF Inp Req = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_input_requests);
+       seq_printf(s, "DIF Outp Req = %lld\n",
+               vha->qla_stats.qla_dif_stats.dif_output_requests);
+       seq_printf(s, "DIF Guard err = %d\n",
+               vha->qla_stats.qla_dif_stats.dif_guard_err);
+       seq_printf(s, "DIF Ref tag err = %d\n",
+               vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+       seq_printf(s, "DIF App tag err = %d\n",
+               vha->qla_stats.qla_dif_stats.dif_app_tag_err);
        return 0;
 }
 
@@ -281,6 +367,14 @@ create_nodes:
                goto out;
        }
 
+       ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+           S_IRUSR,  ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+       if (!ha->tgt.dfs_tgt_port_database) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Unable to create debugFS tgt_port_database node.\n");
+               goto out;
+       }
+
        ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
            &dfs_fce_ops);
        if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
                ha->tgt.dfs_tgt_sess = NULL;
        }
 
+       if (ha->tgt.dfs_tgt_port_database) {
+               debugfs_remove(ha->tgt.dfs_tgt_port_database);
+               ha->tgt.dfs_tgt_port_database = NULL;
+       }
+
        if (ha->dfs_fw_resource_cnt) {
                debugfs_remove(ha->dfs_fw_resource_cnt);
                ha->dfs_fw_resource_cnt = NULL;
index b3d6441..5b24517 100644 (file)
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
 void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
        uint16_t *);
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
 extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       uint32_t *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       uint32_t *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       uint32_t *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
 extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
 
 extern int
 qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
-    dma_addr_t, uint);
+    dma_addr_t, uint16_t);
 
 extern int qla24xx_abort_command(srb_t *);
 extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
 extern int
 qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
 
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+    uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+       struct port_database_24xx *);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
        uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
 void qla24xx_delete_sess_fn(struct work_struct *);
 void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
 
 #endif /* _QLA_GBL_H */
index 32fb900..f9d2fe7 100644 (file)
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
        struct srb *sp = s;
        struct scsi_qla_host *vha = sp->vha;
        struct qla_hw_data *ha = vha->hw;
-       uint64_t zero = 0;
        struct port_database_24xx *pd;
        fc_port_t *fcport = sp->fcport;
        u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
 
        pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
 
-       /* Check for logged in state. */
-       if (pd->current_login_state != PDS_PRLI_COMPLETE &&
-           pd->last_login_state != PDS_PRLI_COMPLETE) {
-               ql_dbg(ql_dbg_mbx, vha, 0xffff,
-                   "Unable to verify login-state (%x/%x) for "
-                   "loop_id %x.\n", pd->current_login_state,
-                   pd->last_login_state, fcport->loop_id);
-               rval = QLA_FUNCTION_FAILED;
-               goto gpd_error_out;
-       }
-
-       if (fcport->loop_id == FC_NO_LOOP_ID ||
-           (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
-               memcmp(fcport->port_name, pd->port_name, 8))) {
-               /* We lost the device mid way. */
-               rval = QLA_NOT_LOGGED_IN;
-               goto gpd_error_out;
-       }
-
-       /* Names are little-endian. */
-       memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
-
-       /* Get port_id of device. */
-       fcport->d_id.b.domain = pd->port_id[0];
-       fcport->d_id.b.area = pd->port_id[1];
-       fcport->d_id.b.al_pa = pd->port_id[2];
-       fcport->d_id.b.rsvd_1 = 0;
-
-       /* If not target must be initiator or unknown type. */
-       if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
-               fcport->port_type = FCT_INITIATOR;
-       else
-               fcport->port_type = FCT_TARGET;
-
-       /* Passback COS information. */
-       fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
-               FC_COS_CLASS2 : FC_COS_CLASS3;
-
-       if (pd->prli_svc_param_word_3[0] & BIT_7) {
-               fcport->flags |= FCF_CONF_COMP_SUPPORTED;
-               fcport->conf_compl_supported = 1;
-       }
+       rval = __qla24xx_parse_gpdb(vha, fcport, pd);
 
 gpd_error_out:
        memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
        fcport->login_retry--;
 
        if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-           (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
            (fcport->fw_login_state == DSC_LS_PRLI_PEND))
                return 0;
 
+       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+               if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+                       return 0;
+       }
+
        /* for pure Target Mode. Login will not be initiated */
        if (vha->host->active_mode == MODE_TARGET)
                return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
                fcport->flags);
 
        if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-           (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
            (fcport->fw_login_state == DSC_LS_PRLI_PEND))
                return;
 
+       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+               if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+                       return;
+       }
+
        if (fcport->flags & FCF_ASYNC_SENT) {
                fcport->login_retry++;
                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
        complete(&abt->u.abt.comp);
 }
 
-static int
+int
 qla24xx_async_abort_cmd(srb_t *cmd_sp)
 {
        scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
        } else {
                ql_dbg(ql_dbg_init, vha, 0x00d3,
                    "Init Firmware -- success.\n");
+               ha->flags.fw_started = 1;
        }
 
        return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        uint8_t       domain;
        char            connect_type[22];
        struct qla_hw_data *ha = vha->hw;
-       unsigned long flags;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+       port_id_t id;
 
        /* Get host addresses. */
        rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
 
        /* Save Host port and loop ID. */
        /* byte order - Big Endian */
-       vha->d_id.b.domain = domain;
-       vha->d_id.b.area = area;
-       vha->d_id.b.al_pa = al_pa;
-
-       spin_lock_irqsave(&ha->vport_slock, flags);
-       qlt_update_vp_map(vha, SET_AL_PA);
-       spin_unlock_irqrestore(&ha->vport_slock, flags);
+       id.b.domain = domain;
+       id.b.area = area;
+       id.b.al_pa = al_pa;
+       id.b.rsvd_1 = 0;
+       qlt_update_host_map(vha, id);
 
        if (!vha->flags.init_done)
                ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
                        atomic_set(&vha->loop_state, LOOP_READY);
                        ql_dbg(ql_dbg_disc, vha, 0x2069,
                            "LOOP READY.\n");
+                       ha->flags.fw_init_done = 1;
 
                        /*
                         * Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
                        }
                }
                atomic_dec(&vha->vref_count);
+               wake_up(&vha->vref_waitq);
        }
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        if (!(IS_P3P_TYPE(ha)))
                ha->isp_ops->reset_chip(vha);
 
+       ha->flags.n2n_ae = 0;
+       ha->flags.lip_ae = 0;
+       ha->current_topology = 0;
+       ha->flags.fw_started = 0;
+       ha->flags.fw_init_done = 0;
        ha->chip_reset++;
 
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
                return;
        if (!ha->fw_major_version)
                return;
+       if (!ha->flags.fw_started)
+               return;
 
        ret = qla2x00_stop_firmware(vha);
        for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
                    "Attempting retry of stop-firmware command.\n");
                ret = qla2x00_stop_firmware(vha);
        }
+
+       ha->flags.fw_started = 0;
+       ha->flags.fw_init_done = 0;
 }
 
 int
index 5350792..ea027f6 100644 (file)
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 
 int
 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
-       uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
        struct scatterlist *sg_prot;
        uint32_t *cur_dsd = dsd;
        uint16_t        used_dsds = tot_dsds;
-
        uint32_t        prot_int; /* protection interval */
        uint32_t        partial;
        struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
                        } else {
                                list_add_tail(&dsd_ptr->list,
                                    &(tc->ctx->dsd_list));
-                               tc->ctx_dsd_alloced = 1;
+                               *tc->ctx_dsd_alloced = 1;
                        }
 
 
@@ -1005,7 +1004,7 @@ alloc_and_fill:
 
 int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
-       uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
                        } else {
                                list_add_tail(&dsd_ptr->list,
                                    &(tc->ctx->dsd_list));
-                               tc->ctx_dsd_alloced = 1;
+                               *tc->ctx_dsd_alloced = 1;
                        }
 
                        /* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 
 int
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
-       uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                        } else {
                                list_add_tail(&dsd_ptr->list,
                                    &(tc->ctx->dsd_list));
-                               tc->ctx_dsd_alloced = 1;
+                               *tc->ctx_dsd_alloced = 1;
                        }
 
                        /* add new list to cmd iocb or last list */
index 3c66ea2..3203367 100644 (file)
@@ -708,6 +708,8 @@ skip_rio:
                    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
 
                ha->isp_ops->fw_dump(vha, 1);
+               ha->flags.fw_init_done = 0;
+               ha->flags.fw_started = 0;
 
                if (IS_FWI2_CAPABLE(ha)) {
                        if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
                break;
 
        case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
+               ha->flags.lip_ae = 1;
+               ha->flags.n2n_ae = 0;
+
                ql_dbg(ql_dbg_async, vha, 0x5009,
                    "LIP occurred (%x).\n", mb[1]);
 
@@ -797,6 +802,10 @@ skip_rio:
                break;
 
        case MBA_LOOP_DOWN:             /* Loop Down Event */
+               ha->flags.n2n_ae = 0;
+               ha->flags.lip_ae = 0;
+               ha->current_topology = 0;
+
                mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
                        ? RD_REG_WORD(&reg24->mailbox4) : 0;
                mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
 
        /* case MBA_DCBX_COMPLETE: */
        case MBA_POINT_TO_POINT:        /* Point-to-Point */
+               ha->flags.lip_ae = 0;
+               ha->flags.n2n_ae = 1;
+
                if (IS_QLA2100(ha))
                        break;
 
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                QLA_LOGIO_LOGIN_RETRIED : 0;
        if (logio->entry_status) {
                ql_log(ql_log_warn, fcport->vha, 0x5034,
-                   "Async-%s error entry - hdl=%x"
+                   "Async-%s error entry - %8phC hdl=%x"
                    "portid=%02x%02x%02x entry-status=%x.\n",
-                   type, sp->handle, fcport->d_id.b.domain,
+                   type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
                    fcport->d_id.b.area, fcport->d_id.b.al_pa,
                    logio->entry_status);
                ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
 
        if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
                ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
-                   "Async-%s complete - hdl=%x portid=%02x%02x%02x "
-                   "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+                   "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+                   "iop0=%x.\n", type, fcport->port_name, sp->handle,
+                   fcport->d_id.b.domain,
                    fcport->d_id.b.area, fcport->d_id.b.al_pa,
                    le32_to_cpu(logio->io_parameter[0]));
 
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        case LSC_SCODE_NPORT_USED:
                data[0] = MBS_LOOP_ID_USED;
                break;
+       case LSC_SCODE_CMD_FAILED:
+               if (iop[1] == 0x0606) {
+                       /*
+                        * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+                        * Target side acked.
+                        */
+                       data[0] = MBS_COMMAND_COMPLETE;
+                       goto logio_done;
+               }
+               data[0] = MBS_COMMAND_ERROR;
+               break;
        case LSC_SCODE_NOXCB:
                vha->hw->exch_starvation++;
                if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
-           "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
-           "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+           "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+           "iop0=%x iop1=%x.\n", type, fcport->port_name,
+               sp->handle, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa,
            le16_to_cpu(logio->comp_status),
            le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
                return;
 
        abt = &sp->u.iocb_cmd;
-       abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+       abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
        sp->done(sp, 0);
 }
 
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        struct sts_entry_24xx *pkt;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!vha->flags.online)
+       if (!ha->flags.fw_started)
                return;
 
        while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
index 35079f4..a113ab3 100644 (file)
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
+static struct mb_cmd_name {
+       uint16_t cmd;
+       const char *str;
+} mb_str[] = {
+       {MBC_GET_PORT_DATABASE,         "GPDB"},
+       {MBC_GET_ID_LIST,               "GIDList"},
+       {MBC_GET_LINK_PRIV_STATS,       "Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+       int i;
+       struct mb_cmd_name *e;
+
+       for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+               e = mb_str + i;
+               if (cmd == e->cmd)
+                       return e->str;
+       }
+       return "unknown";
+}
+
 static struct rom_cmd {
        uint16_t cmd;
 } rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
 
 int
 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
-    dma_addr_t stats_dma, uint options)
+    dma_addr_t stats_dma, uint16_t options)
 {
        int rval;
        mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
            "Entered %s.\n", __func__);
 
-       mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
-       mcp->mb[2] = MSW(stats_dma);
-       mcp->mb[3] = LSW(stats_dma);
-       mcp->mb[6] = MSW(MSD(stats_dma));
-       mcp->mb[7] = LSW(MSD(stats_dma));
-       mcp->mb[8] = sizeof(struct link_statistics) / 4;
-       mcp->mb[9] = vha->vp_idx;
-       mcp->mb[10] = options;
-       mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
-       mcp->in_mb = MBX_2|MBX_1|MBX_0;
-       mcp->tov = MBX_TOV_SECONDS;
-       mcp->flags = IOCTL_CMD;
-       rval = qla2x00_mailbox_command(vha, mcp);
+       memset(&mc, 0, sizeof(mc));
+       mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+       mc.mb[2] = MSW(stats_dma);
+       mc.mb[3] = LSW(stats_dma);
+       mc.mb[6] = MSW(MSD(stats_dma));
+       mc.mb[7] = LSW(MSD(stats_dma));
+       mc.mb[8] = sizeof(struct link_statistics) / 4;
+       mc.mb[9] = cpu_to_le16(vha->vp_idx);
+       mc.mb[10] = cpu_to_le16(options);
+
+       rval = qla24xx_send_mb_cmd(vha, &mc);
 
        if (rval == QLA_SUCCESS) {
                if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        scsi_qla_host_t *vp = NULL;
        unsigned long   flags;
        int found;
+       port_id_t id;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
            "Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        if (rptid_entry->entry_status != 0)
                return;
 
+       id.b.domain = rptid_entry->port_id[2];
+       id.b.area   = rptid_entry->port_id[1];
+       id.b.al_pa  = rptid_entry->port_id[0];
+       id.b.rsvd_1 = 0;
+
        if (rptid_entry->format == 0) {
                /* loop */
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+               ql_dbg(ql_dbg_async, vha, 0x10b7,
                    "Format 0 : Number of VPs setup %d, number of "
                    "VPs acquired %d.\n", rptid_entry->vp_setup,
                    rptid_entry->vp_acquired);
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+               ql_dbg(ql_dbg_async, vha, 0x10b8,
                    "Primary port id %02x%02x%02x.\n",
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
                    rptid_entry->port_id[0]);
 
-               vha->d_id.b.domain = rptid_entry->port_id[2];
-               vha->d_id.b.area = rptid_entry->port_id[1];
-               vha->d_id.b.al_pa = rptid_entry->port_id[0];
-
-               spin_lock_irqsave(&ha->vport_slock, flags);
-               qlt_update_vp_map(vha, SET_AL_PA);
-               spin_unlock_irqrestore(&ha->vport_slock, flags);
+               qlt_update_host_map(vha, id);
 
        } else if (rptid_entry->format == 1) {
                /* fabric */
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+               ql_dbg(ql_dbg_async, vha, 0x10b9,
                    "Format 1: VP[%d] enabled - status %d - with "
                    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
                        rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                            WWN_SIZE);
                                }
 
-                               vha->d_id.b.domain = rptid_entry->port_id[2];
-                               vha->d_id.b.area = rptid_entry->port_id[1];
-                               vha->d_id.b.al_pa = rptid_entry->port_id[0];
-                               spin_lock_irqsave(&ha->vport_slock, flags);
-                               qlt_update_vp_map(vha, SET_AL_PA);
-                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+                               qlt_update_host_map(vha, id);
                        }
 
                        fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        if (!found)
                                return;
 
-                       vp->d_id.b.domain = rptid_entry->port_id[2];
-                       vp->d_id.b.area =  rptid_entry->port_id[1];
-                       vp->d_id.b.al_pa = rptid_entry->port_id[0];
-                       spin_lock_irqsave(&ha->vport_slock, flags);
-                       qlt_update_vp_map(vp, SET_AL_PA);
-                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+                       qlt_update_host_map(vp, id);
 
                        /*
                         * Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
 
        return rval;
 }
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+       struct srb *sp = s;
+
+       sp->u.iocb_cmd.u.mbx.rc = res;
+
+       complete(&sp->u.iocb_cmd.u.mbx.comp);
+       /* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+       int rval = QLA_FUNCTION_FAILED;
+       srb_t *sp;
+       struct srb_iocb *c;
+
+       if (!vha->hw->flags.fw_started)
+               goto done;
+
+       sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+       if (!sp)
+               goto done;
+
+       sp->type = SRB_MB_IOCB;
+       sp->name = mb_to_str(mcp->mb[0]);
+
+       qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+       memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+       c = &sp->u.iocb_cmd;
+       c->timeout = qla2x00_async_iocb_timeout;
+       init_completion(&c->u.mbx.comp);
+
+       sp->done = qla2x00_async_mb_sp_done;
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                   "%s: %s Failed submission. %x.\n",
+                   __func__, sp->name, rval);
+               goto done_free_sp;
+       }
+
+       ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+           sp->name, sp->handle);
+
+       wait_for_completion(&c->u.mbx.comp);
+       memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+       rval = c->u.mbx.rc;
+       switch (rval) {
+       case QLA_FUNCTION_TIMEOUT:
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+                   __func__, sp->name, rval);
+               break;
+       case  QLA_SUCCESS:
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+                   __func__, sp->name);
+               sp->free(sp);
+               break;
+       default:
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+                   __func__, sp->name, rval);
+               sp->free(sp);
+               break;
+       }
+
+       return rval;
+
+done_free_sp:
+       sp->free(sp);
+done:
+       return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+       int rval = QLA_FUNCTION_FAILED;
+       dma_addr_t pd_dma;
+       struct port_database_24xx *pd;
+       struct qla_hw_data *ha = vha->hw;
+       mbx_cmd_t mc;
+
+       if (!vha->hw->flags.fw_started)
+               goto done;
+
+       pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+       if (pd  == NULL) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                       "Failed to allocate port database structure.\n");
+               goto done_free_sp;
+       }
+       memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+       memset(&mc, 0, sizeof(mc));
+       mc.mb[0] = MBC_GET_PORT_DATABASE;
+       mc.mb[1] = cpu_to_le16(fcport->loop_id);
+       mc.mb[2] = MSW(pd_dma);
+       mc.mb[3] = LSW(pd_dma);
+       mc.mb[6] = MSW(MSD(pd_dma));
+       mc.mb[7] = LSW(MSD(pd_dma));
+       mc.mb[9] = cpu_to_le16(vha->vp_idx);
+       mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+       rval = qla24xx_send_mb_cmd(vha, &mc);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                   "%s: %8phC fail\n", __func__, fcport->port_name);
+               goto done_free_sp;
+       }
+
+       rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+       ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+           __func__, fcport->port_name);
+
+done_free_sp:
+       if (pd)
+               dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+       return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+    struct port_database_24xx *pd)
+{
+       int rval = QLA_SUCCESS;
+       uint64_t zero = 0;
+
+       /* Check for logged in state. */
+       if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+               pd->last_login_state != PDS_PRLI_COMPLETE) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                          "Unable to verify login-state (%x/%x) for "
+                          "loop_id %x.\n", pd->current_login_state,
+                          pd->last_login_state, fcport->loop_id);
+               rval = QLA_FUNCTION_FAILED;
+               goto gpd_error_out;
+       }
+
+       if (fcport->loop_id == FC_NO_LOOP_ID ||
+           (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+            memcmp(fcport->port_name, pd->port_name, 8))) {
+               /* We lost the device mid way. */
+               rval = QLA_NOT_LOGGED_IN;
+               goto gpd_error_out;
+       }
+
+       /* Names are little-endian. */
+       memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+       memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+       /* Get port_id of device. */
+       fcport->d_id.b.domain = pd->port_id[0];
+       fcport->d_id.b.area = pd->port_id[1];
+       fcport->d_id.b.al_pa = pd->port_id[2];
+       fcport->d_id.b.rsvd_1 = 0;
+
+       /* If not target must be initiator or unknown type. */
+       if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+               fcport->port_type = FCT_INITIATOR;
+       else
+               fcport->port_type = FCT_TARGET;
+
+       /* Passback COS information. */
+       fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+               FC_COS_CLASS2 : FC_COS_CLASS3;
+
+       if (pd->prli_svc_param_word_3[0] & BIT_7) {
+               fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+               fcport->conf_compl_supported = 1;
+       }
+
+gpd_error_out:
+       return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+       void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+       int rval = QLA_FUNCTION_FAILED;
+       mbx_cmd_t mc;
+
+       if (!vha->hw->flags.fw_started)
+               goto done;
+
+       memset(&mc, 0, sizeof(mc));
+       mc.mb[0] = MBC_GET_ID_LIST;
+       mc.mb[2] = MSW(id_list_dma);
+       mc.mb[3] = LSW(id_list_dma);
+       mc.mb[6] = MSW(MSD(id_list_dma));
+       mc.mb[7] = LSW(MSD(id_list_dma));
+       mc.mb[8] = 0;
+       mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+       rval = qla24xx_send_mb_cmd(vha, &mc);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                       "%s:  fail\n", __func__);
+       } else {
+               *entries = mc.mb[1];
+               ql_dbg(ql_dbg_mbx, vha, 0xffff,
+                       "%s:  done\n", __func__);
+       }
+done:
+       return rval;
+}
index c6d6f0d..09a490c 100644 (file)
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
         * ensures no active vp_list traversal while the vport is removed
         * from the queue)
         */
-       spin_lock_irqsave(&ha->vport_slock, flags);
-       while (atomic_read(&vha->vref_count)) {
-               spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-               msleep(500);
+       wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+           10*HZ);
 
-               spin_lock_irqsave(&ha->vport_slock, flags);
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       if (atomic_read(&vha->vref_count)) {
+               ql_dbg(ql_dbg_vport, vha, 0xfffa,
+                   "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+               vha->vref_count = (atomic_t)ATOMIC_INIT(0);
        }
        list_del(&vha->list);
        qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 
                        spin_lock_irqsave(&ha->vport_slock, flags);
                        atomic_dec(&vha->vref_count);
+                       wake_up(&vha->vref_waitq);
                }
                i++;
        }
index 1fed235..41d5b09 100644 (file)
@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
        return atomic_read(&vha->loop_state) == LOOP_READY;
 }
 
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+       struct scsi_qla_host *vha = container_of(work,
+               struct scsi_qla_host, iocb_work);
+       int cnt = 0;
+
+       while (!list_empty(&vha->work_list)) {
+               qla2x00_do_work(vha);
+               cnt++;
+               if (cnt > 10)
+                       break;
+       }
+}
+
 /*
  * PCI driver interface
  */
@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        qla2xxx_wake_dpc(base_vha);
 
+       INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
        INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
 
        if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
        qla2x00_free_sysfs_attr(base_vha, true);
 
        fc_remove_host(base_vha->host);
+       qlt_remove_target_resources(ha);
 
        scsi_remove_host(base_vha->host);
 
@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
        init_waitqueue_head(&vha->fcport_waitQ);
+       init_waitqueue_head(&vha->vref_waitq);
 
        vha->gnl.size = sizeof(struct get_name_list_extended) *
                        (ha->max_loop_id + 1);
@@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
        spin_lock_irqsave(&vha->work_lock, flags);
        list_add_tail(&e->list, &vha->work_list);
        spin_unlock_irqrestore(&vha->work_lock, flags);
-       qla2xxx_wake_dpc(vha);
+
+       if (QLA_EARLY_LINKUP(vha->hw))
+               schedule_work(&vha->iocb_work);
+       else
+               qla2xxx_wake_dpc(vha);
 
        return QLA_SUCCESS;
 }
index 45f5077..0e03ca2 100644 (file)
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
        fc_port_t *fcport, bool local);
 void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+       struct abts_recv_from_24xx *);
+
 /*
  * Global Variables
  */
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
 static LIST_HEAD(qla_tgt_glist);
 
+static const char *prot_op_str(u32 prot_op)
+{
+       switch (prot_op) {
+       case TARGET_PROT_NORMAL:        return "NORMAL";
+       case TARGET_PROT_DIN_INSERT:    return "DIN_INSERT";
+       case TARGET_PROT_DOUT_INSERT:   return "DOUT_INSERT";
+       case TARGET_PROT_DIN_STRIP:     return "DIN_STRIP";
+       case TARGET_PROT_DOUT_STRIP:    return "DOUT_STRIP";
+       case TARGET_PROT_DIN_PASS:      return "DIN_PASS";
+       case TARGET_PROT_DOUT_PASS:     return "DOUT_PASS";
+       default:                        return "UNKNOWN";
+       }
+}
+
 /* This API intentionally takes dest as a parameter, rather than returning
  * int value to avoid caller forgetting to issue wmb() after the store */
 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
        uint8_t *d_id)
 {
-       struct qla_hw_data *ha = vha->hw;
-       uint8_t vp_idx;
-
-       if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
-               return NULL;
+       struct scsi_qla_host *host;
+       uint32_t key = 0;
 
-       if (vha->d_id.b.al_pa == d_id[2])
+       if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+           (vha->d_id.b.al_pa == d_id[2]))
                return vha;
 
-       BUG_ON(ha->tgt.tgt_vp_map == NULL);
-       vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
-       if (likely(test_bit(vp_idx, ha->vp_idx_map)))
-               return ha->tgt.tgt_vp_map[vp_idx].vha;
+       key  = (uint32_t)d_id[0] << 16;
+       key |= (uint32_t)d_id[1] <<  8;
+       key |= (uint32_t)d_id[2];
 
-       return NULL;
+       host = btree_lookup32(&vha->hw->tgt.host_map, key);
+       if (!host)
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                          "Unable to find host %06x\n", key);
+
+       return host;
 }
 
 static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
                        (struct abts_recv_from_24xx *)atio;
                struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
                        entry->vp_index);
+               unsigned long flags;
+
                if (unlikely(!host)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xffff,
                            "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
                            vha->vp_idx, entry->vp_index);
                        break;
                }
-               qlt_response_pkt(host, (response_t *)atio);
+               if (!ha_locked)
+                       spin_lock_irqsave(&host->hw->hardware_lock, flags);
+               qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+               if (!ha_locked)
+                       spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
                break;
-
        }
 
        /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
                sp->fcport->login_gen++;
                sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
                sp->fcport->logout_on_delete = 1;
+               sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
                break;
 
        case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
                break;
        case SRB_NACK_PRLI:
                fcport->fw_login_state = DSC_LS_PRLI_PEND;
+               fcport->deleted = 0;
                c = "PRLI";
                break;
        case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
        }
 
        /* Get list of logged in devices */
-       rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+       rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
        if (rc != QLA_SUCCESS) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
                    "qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
        request_t *pkt;
        struct nack_to_isp *nack;
 
+       if (!ha->flags.fw_started)
+               return;
+
        ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
 
        /* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
 }
 EXPORT_SYMBOL(qlt_free_mcmd);
 
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+       struct atio_from_isp *atio = &cmd->atio;
+       struct ctio7_to_24xx *ctio;
+       uint16_t temp;
+
+       ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+           "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+           "sense_key=%02x, asc=%02x, ascq=%02x",
+           vha, atio, scsi_status, sense_key, asc, ascq);
+
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+       if (!ctio) {
+               ql_dbg(ql_dbg_async, vha, 0x3067,
+                   "qla2x00t(%ld): %s failed: unable to allocate request packet",
+                   vha->host_no, __func__);
+               goto out;
+       }
+
+       ctio->entry_type = CTIO_TYPE7;
+       ctio->entry_count = 1;
+       ctio->handle = QLA_TGT_SKIP_HANDLE;
+       ctio->nport_handle = cmd->sess->loop_id;
+       ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+       ctio->vp_index = vha->vp_idx;
+       ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       ctio->exchange_addr = atio->u.isp24.exchange_addr;
+       ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+           cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       ctio->u.status1.ox_id = cpu_to_le16(temp);
+       ctio->u.status1.scsi_status =
+           cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+       ctio->u.status1.response_len = cpu_to_le16(18);
+       ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+       if (ctio->u.status1.residual != 0)
+               ctio->u.status1.scsi_status |=
+                   cpu_to_le16(SS_RESIDUAL_UNDER);
+
+       /* Response code and sense key */
+       put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+           (&ctio->u.status1.sense_data)[0]);
+       /* Additional sense length */
+       put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+       /* ASC and ASCQ */
+       put_unaligned_le32(((asc << 24) | (ascq << 16)),
+           (&ctio->u.status1.sense_data)[3]);
+
+       /* Memory Barrier */
+       wmb();
+
+       qla2x00_start_iocbs(vha, vha->req);
+out:
+       return;
+}
+
 /* callback from target fabric module code */
 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
 {
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
                 */
                return -EAGAIN;
        } else
-               ha->tgt.cmds[h-1] = prm->cmd;
+               ha->tgt.cmds[h - 1] = prm->cmd;
 
        pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
        pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
        return cmd->bufflen > 0;
 }
 
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+       struct qla_tgt_cmd *cmd;
+       struct scsi_qla_host *vha;
+
+       /* asc 0x10=dif error */
+       if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+               cmd = prm->cmd;
+               vha = cmd->vha;
+               /* ASCQ */
+               switch (prm->sense_buffer[13]) {
+               case 1:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               case 2:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               case 3:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               default:
+                       ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                           "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+                           "se_cmd=%p tag[%x]",
+                           cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+                           cmd->atio.u.isp24.exchange_addr);
+                       break;
+               }
+               ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+       }
+}
+
 /*
  * Called without ha->hardware_lock held
  */
@@ -2512,18 +2649,9 @@ skip_explict_conf:
                for (i = 0; i < prm->sense_buffer_len/4; i++)
                        ((uint32_t *)ctio->u.status1.sense_data)[i] =
                                cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-#if 0
-               if (unlikely((prm->sense_buffer_len % 4) != 0)) {
-                       static int q;
-                       if (q < 10) {
-                               ql_dbg(ql_dbg_tgt, vha, 0xe04f,
-                                   "qla_target(%d): %d bytes of sense "
-                                   "lost", prm->tgt->ha->vp_idx,
-                                   prm->sense_buffer_len % 4);
-                               q++;
-                       }
-               }
-#endif
+
+               qlt_print_dif_err(prm);
+
        } else {
                ctio->u.status1.flags &=
                    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
        /* Sense with len > 24, is it possible ??? */
 }
 
-
-
-/* diff  */
 static inline int
 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
 {
-       /*
-        * Uncomment when corresponding SCSI changes are done.
-        *
-        if (!sp->cmd->prot_chk)
-        return 0;
-        *
-        */
        switch (se_cmd->prot_op) {
        case TARGET_PROT_DOUT_INSERT:
        case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
        return 0;
 }
 
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
+{
+       switch (se_cmd->prot_op) {
+       case TARGET_PROT_DIN_INSERT:
+       case TARGET_PROT_DOUT_INSERT:
+       case TARGET_PROT_DIN_STRIP:
+       case TARGET_PROT_DOUT_STRIP:
+       case TARGET_PROT_DIN_PASS:
+       case TARGET_PROT_DOUT_PASS:
+           return 1;
+       default:
+           return 0;
+       }
+       return 0;
+}
+
 /*
- * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
- *
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
  */
-static inline void
-qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+    uint16_t *pfw_prot_opts)
 {
+       struct se_cmd *se_cmd = &cmd->se_cmd;
        uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+       scsi_qla_host_t *vha = cmd->tgt->vha;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t t32 = 0;
 
-       /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+       /*
+        * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
         * have been immplemented by TCM, before AppTag is avail.
         * Look for modesense_handlers[]
         */
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
        ctx->app_tag_mask[0] = 0x0;
        ctx->app_tag_mask[1] = 0x0;
 
+       if (IS_PI_UNINIT_CAPABLE(ha)) {
+               if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+                   (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+                       *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+               else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+                       *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+       }
+
+       t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
        switch (se_cmd->prot_type) {
        case TARGET_DIF_TYPE0_PROT:
                /*
-                * No check for ql2xenablehba_err_chk, as it would be an
-                * I/O error if hba tag generation is not done.
+                * No check for ql2xenablehba_err_chk, as it
+                * would be an I/O error if hba tag generation
+                * is not done.
                 */
                ctx->ref_tag = cpu_to_le32(lba);
-
-               if (!qlt_hba_err_chk_enabled(se_cmd))
-                       break;
-
                /* enable ALL bytes of the ref tag */
                ctx->ref_tag_mask[0] = 0xff;
                ctx->ref_tag_mask[1] = 0xff;
                ctx->ref_tag_mask[2] = 0xff;
                ctx->ref_tag_mask[3] = 0xff;
                break;
-       /*
-        * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
-        * 16 bit app tag.
-        */
        case TARGET_DIF_TYPE1_PROT:
-               ctx->ref_tag = cpu_to_le32(lba);
-
-               if (!qlt_hba_err_chk_enabled(se_cmd))
-                       break;
-
-               /* enable ALL bytes of the ref tag */
-               ctx->ref_tag_mask[0] = 0xff;
-               ctx->ref_tag_mask[1] = 0xff;
-               ctx->ref_tag_mask[2] = 0xff;
-               ctx->ref_tag_mask[3] = 0xff;
-               break;
-       /*
-        * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
-        * match LBA in CDB + N
-        */
+           /*
+            * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+            * REF tag, and 16 bit app tag.
+            */
+           ctx->ref_tag = cpu_to_le32(lba);
+           if (!qla_tgt_ref_mask_check(se_cmd) ||
+               !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+                   *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+                   break;
+           }
+           /* enable ALL bytes of the ref tag */
+           ctx->ref_tag_mask[0] = 0xff;
+           ctx->ref_tag_mask[1] = 0xff;
+           ctx->ref_tag_mask[2] = 0xff;
+           ctx->ref_tag_mask[3] = 0xff;
+           break;
        case TARGET_DIF_TYPE2_PROT:
-               ctx->ref_tag = cpu_to_le32(lba);
-
-               if (!qlt_hba_err_chk_enabled(se_cmd))
-                       break;
-
-               /* enable ALL bytes of the ref tag */
-               ctx->ref_tag_mask[0] = 0xff;
-               ctx->ref_tag_mask[1] = 0xff;
-               ctx->ref_tag_mask[2] = 0xff;
-               ctx->ref_tag_mask[3] = 0xff;
-               break;
-
-       /* For Type 3 protection: 16 bit GUARD only */
+           /*
+            * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+            * tag has to match LBA in CDB + N
+            */
+           ctx->ref_tag = cpu_to_le32(lba);
+           if (!qla_tgt_ref_mask_check(se_cmd) ||
+               !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+                   *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+                   break;
+           }
+           /* enable ALL bytes of the ref tag */
+           ctx->ref_tag_mask[0] = 0xff;
+           ctx->ref_tag_mask[1] = 0xff;
+           ctx->ref_tag_mask[2] = 0xff;
+           ctx->ref_tag_mask[3] = 0xff;
+           break;
        case TARGET_DIF_TYPE3_PROT:
-               ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
-                       ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
-               break;
+           /* For TYPE 3 protection: 16 bit GUARD only */
+           *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+           ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+               ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+           break;
        }
 }
 
-
 static inline int
 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
 {
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        struct se_cmd           *se_cmd = &cmd->se_cmd;
        uint32_t h;
        struct atio_from_isp *atio = &prm->cmd->atio;
+       struct qla_tc_param     tc;
        uint16_t t16;
 
        ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        case TARGET_PROT_DIN_INSERT:
        case TARGET_PROT_DOUT_STRIP:
                transfer_length = data_bytes;
-               data_bytes += dif_bytes;
+               if (cmd->prot_sg_cnt)
+                       data_bytes += dif_bytes;
                break;
-
        case TARGET_PROT_DIN_STRIP:
        case TARGET_PROT_DOUT_INSERT:
        case TARGET_PROT_DIN_PASS:
        case TARGET_PROT_DOUT_PASS:
                transfer_length = data_bytes + dif_bytes;
                break;
-
        default:
                BUG();
                break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
                break;
        }
 
-
        /* ---- PKT ---- */
        /* Update entry type to indicate Command Type CRC_2 IOCB */
        pkt->entry_type  = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        } else
                ha->tgt.cmds[h-1] = prm->cmd;
 
-
        pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
-       pkt->nport_handle = prm->cmd->loop_id;
+       pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
        pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
        pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
        pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
                pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
 
-
        pkt->dseg_count = prm->tot_dsds;
        /* Fibre channel byte count */
        pkt->transfer_length = cpu_to_le32(transfer_length);
 
-
        /* ----- CRC context -------- */
 
        /* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        /* Set handle */
        crc_ctx_pkt->handle = pkt->handle;
 
-       qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+       qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
 
        pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
        pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
        pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 
-
        if (!bundling) {
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
        } else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
 
+       memset((uint8_t *)&tc, 0 , sizeof(tc));
+       tc.vha = vha;
+       tc.blk_sz = cmd->blk_sz;
+       tc.bufflen = cmd->bufflen;
+       tc.sg = cmd->sg;
+       tc.prot_sg = cmd->prot_sg;
+       tc.ctx = crc_ctx_pkt;
+       tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
 
        /* Walks data segments */
        pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
 
        if (!bundling && prm->prot_seg_cnt) {
                if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
-                       prm->tot_dsds, cmd))
+                       prm->tot_dsds, &tc))
                        goto crc_queuing_error;
        } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
-               (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+               (prm->tot_dsds - prm->prot_seg_cnt), &tc))
                goto crc_queuing_error;
 
        if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
 
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
                if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
-                       prm->prot_seg_cnt, cmd))
+                       prm->prot_seg_cnt, &tc))
                        goto crc_queuing_error;
        }
        return QLA_SUCCESS;
 
 crc_queuing_error:
        /* Cleanup will be performed by the caller */
+       vha->hw->tgt.cmds[h - 1] = NULL;
 
        return QLA_FUNCTION_FAILED;
 }
 
-
 /*
  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        else
                vha->tgt_counters.core_qla_que_buf++;
 
-       if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
+       if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
                /*
                 * Either the port is not online or this request was from
                 * previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
+       if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                /*
                 * Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
 
 
 /*
- * Checks the guard or meta-data for the type of error
- * detected by the HBA.
+ * it is assumed either hardware_lock or qpair lock is held.
  */
-static inline int
+static void
 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
-               struct ctio_crc_from_fw *sts)
+       struct ctio_crc_from_fw *sts)
 {
        uint8_t         *ap = &sts->actual_dif[0];
        uint8_t         *ep = &sts->expected_dif[0];
-       uint32_t        e_ref_tag, a_ref_tag;
-       uint16_t        e_app_tag, a_app_tag;
-       uint16_t        e_guard, a_guard;
        uint64_t        lba = cmd->se_cmd.t_task_lba;
+       uint8_t scsi_status, sense_key, asc, ascq;
+       unsigned long flags;
 
-       a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
-       a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
-       a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-
-       e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
-       e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
-       e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
-
-       ql_dbg(ql_dbg_tgt, vha, 0xe075,
-           "iocb(s) %p Returned STATUS.\n", sts);
-
-       ql_dbg(ql_dbg_tgt, vha, 0xf075,
-           "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
-           cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-           a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
-
-       /*
-        * Ignore sector if:
-        * For type     3: ref & app tag is all 'f's
-        * For type 0,1,2: app tag is all 'f's
-        */
-       if ((a_app_tag == 0xffff) &&
-           ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
-            (a_ref_tag == 0xffffffff))) {
-               uint32_t blocks_done;
-
-               /* 2TB boundary case covered automatically with this */
-               blocks_done = e_ref_tag - (uint32_t)lba + 1;
-               cmd->se_cmd.bad_sector = e_ref_tag;
-               cmd->se_cmd.pi_err = 0;
-               ql_dbg(ql_dbg_tgt, vha, 0xf074,
-                       "need to return scsi good\n");
-
-               /* Update protection tag */
-               if (cmd->prot_sg_cnt) {
-                       uint32_t i, k = 0, num_ent;
-                       struct scatterlist *sg, *sgl;
-
-
-                       sgl = cmd->prot_sg;
-
-                       /* Patch the corresponding protection tags */
-                       for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
-                               num_ent = sg_dma_len(sg) / 8;
-                               if (k + num_ent < blocks_done) {
-                                       k += num_ent;
-                                       continue;
-                               }
-                               k = blocks_done;
-                               break;
-                       }
+       cmd->trc_flags |= TRC_DIF_ERR;
 
-                       if (k != blocks_done) {
-                               ql_log(ql_log_warn, vha, 0xf076,
-                                   "unexpected tag values tag:lba=%u:%llu)\n",
-                                   e_ref_tag, (unsigned long long)lba);
-                               goto out;
-                       }
+       cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
+       cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+       cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
 
-#if 0
-                       struct sd_dif_tuple *spt;
-                       /* TODO:
-                        * This section came from initiator. Is it valid here?
-                        * should ulp be override with actual val???
-                        */
-                       spt = page_address(sg_page(sg)) + sg->offset;
-                       spt += j;
+       cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
+       cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+       cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
 
-                       spt->app_tag = 0xffff;
-                       if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
-                               spt->ref_tag = 0xffffffff;
-#endif
-               }
+       ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+           "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
 
-               return 0;
-       }
+       scsi_status = sense_key = asc = ascq = 0;
 
-       /* check guard */
-       if (e_guard != a_guard) {
-               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
-               cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
-               ql_log(ql_log_warn, vha, 0xe076,
-                   "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
-                   cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-                   a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
-                   a_guard, e_guard, cmd);
-               goto out;
+       /* check appl tag */
+       if (cmd->e_app_tag != cmd->a_app_tag) {
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                       "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+                       "Ref[%x|%x], App[%x|%x], "
+                       "Guard [%x|%x] cmd=%p ox_id[%04x]",
+                       cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+                       cmd->a_ref_tag, cmd->e_ref_tag,
+                       cmd->a_app_tag, cmd->e_app_tag,
+                       cmd->a_guard, cmd->e_guard,
+                       cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+               cmd->dif_err_code = DIF_ERR_APP;
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               sense_key = ABORTED_COMMAND;
+               asc = 0x10;
+               ascq = 0x2;
        }
 
        /* check ref tag */
-       if (e_ref_tag != a_ref_tag) {
-               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
-               cmd->se_cmd.bad_sector = e_ref_tag;
-
-               ql_log(ql_log_warn, vha, 0xe077,
-                       "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
-                       cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-                       a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
-                       a_guard, e_guard, cmd);
+       if (cmd->e_ref_tag != cmd->a_ref_tag) {
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                       "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+                       "Ref[%x|%x], App[%x|%x], "
+                       "Guard[%x|%x] cmd=%p ox_id[%04x] ",
+                       cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+                       cmd->a_ref_tag, cmd->e_ref_tag,
+                       cmd->a_app_tag, cmd->e_app_tag,
+                       cmd->a_guard, cmd->e_guard,
+                       cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+               cmd->dif_err_code = DIF_ERR_REF;
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               sense_key = ABORTED_COMMAND;
+               asc = 0x10;
+               ascq = 0x3;
                goto out;
        }
 
-       /* check appl tag */
-       if (e_app_tag != a_app_tag) {
-               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
-               cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
-               ql_log(ql_log_warn, vha, 0xe078,
-                       "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
-                       cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
-                       a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
-                       a_guard, e_guard, cmd);
-               goto out;
+       /* check guard */
+       if (cmd->e_guard != cmd->a_guard) {
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                       "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+                       "Ref[%x|%x], App[%x|%x], "
+                       "Guard [%x|%x] cmd=%p ox_id[%04x]",
+                       cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+                       cmd->a_ref_tag, cmd->e_ref_tag,
+                       cmd->a_app_tag, cmd->e_app_tag,
+                       cmd->a_guard, cmd->e_guard,
+                       cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+               cmd->dif_err_code = DIF_ERR_GRD;
+               scsi_status = SAM_STAT_CHECK_CONDITION;
+               sense_key = ABORTED_COMMAND;
+               asc = 0x10;
+               ascq = 0x1;
        }
 out:
-       return 1;
-}
+       switch (cmd->state) {
+       case QLA_TGT_STATE_NEED_DATA:
+               /* handle_data will load DIF error code  */
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+               vha->hw->tgt.tgt_ops->handle_data(cmd);
+               break;
+       default:
+               spin_lock_irqsave(&cmd->cmd_lock, flags);
+               if (cmd->aborted) {
+                       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+                       vha->hw->tgt.tgt_ops->free_cmd(cmd);
+                       break;
+               }
+               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
 
+               qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+               /* assume scsi status gets out on the wire.
+                * Will not wait for completion.
+                */
+               vha->hw->tgt.tgt_ops->free_cmd(cmd);
+               break;
+       }
+}
 
 /* If hardware_lock held on entry, might drop it, then reaquire */
 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
        ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
            "Sending TERM ELS CTIO (ha=%p)\n", ha);
 
-       pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+       pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
        if (pkt == NULL) {
                ql_dbg(ql_dbg_tgt, vha, 0xe080,
                    "qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
 {
        int term = 0;
 
+       if (cmd->se_cmd.prot_op)
+               ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+                   "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+                   "se_cmd=%p tag[%x] op %#x/%s",
+                    cmd->lba, cmd->lba,
+                    cmd->num_blks, &cmd->se_cmd,
+                    cmd->atio.u.isp24.exchange_addr,
+                    cmd->se_cmd.prot_op,
+                    prot_op_str(cmd->se_cmd.prot_op));
+
        if (ctio != NULL) {
                struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
                term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                        struct ctio_crc_from_fw *crc =
                                (struct ctio_crc_from_fw *)ctio;
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
-                           "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+                           "qla_target(%d): CTIO with DIF_ERROR status %x "
+                           "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+                           "expect_dif[0x%llx]\n",
                            vha->vp_idx, status, cmd->state, se_cmd,
                            *((u64 *)&crc->actual_dif[0]),
                            *((u64 *)&crc->expected_dif[0]));
 
-                       if (qlt_handle_dif_error(vha, cmd, ctio)) {
-                               if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
-                                       /* scsi Write/xfer rdy complete */
-                                       goto skip_term;
-                               } else {
-                                       /* scsi read/xmit respond complete
-                                        * call handle dif to send scsi status
-                                        * rather than terminate exchange.
-                                        */
-                                       cmd->state = QLA_TGT_STATE_PROCESSED;
-                                       ha->tgt.tgt_ops->handle_dif_err(cmd);
-                                       return;
-                               }
-                       } else {
-                               /* Need to generate a SCSI good completion.
-                                * because FW did not send scsi status.
-                                */
-                               status = 0;
-                               goto skip_term;
-                       }
-                       break;
+                       qlt_handle_dif_error(vha, cmd, ctio);
+                       return;
                }
                default:
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                                return;
                }
        }
-skip_term:
 
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
                cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                }
 
                if (sess != NULL) {
-                       if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
+                       if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+                           sess->fw_login_state != DSC_LS_PLOGI_COMP) {
                                /*
                                 * Impatient initiator sent PRLI before last
                                 * PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
 
                /* Make session global (not used in fabric mode) */
                if (ha->current_topology != ISP_CFG_F) {
-                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
+                       if (sess) {
+                               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                                   "%s %d %8phC post nack\n",
+                                   __func__, __LINE__, sess->port_name);
+                               qla24xx_post_nack_work(vha, sess, iocb,
+                                       SRB_NACK_PRLI);
+                               res = 0;
+                       } else {
+                               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                               set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
+                       }
                } else {
                        if (sess) {
                                ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                          "%s %d %8phC post nack\n",
-                                          __func__, __LINE__, sess->port_name);
-
+                                   "%s %d %8phC post nack\n",
+                                   __func__, __LINE__, sess->port_name);
                                qla24xx_post_nack_work(vha, sess, iocb,
                                        SRB_NACK_PRLI);
                                res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                }
                break;
 
-
        case ELS_TPRLO:
                if (le16_to_cpu(iocb->u.isp24.flags) &
                        NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
 
 static int
 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
-       struct atio_from_isp *atio)
+       struct atio_from_isp *atio, bool ha_locked)
 {
        struct qla_hw_data *ha = vha->hw;
        uint16_t status;
+       unsigned long flags;
 
        if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
                return 0;
 
+       if (!ha_locked)
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        status = temp_sam_status;
        qlt_send_busy(vha, atio, status);
+       if (!ha_locked)
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        return 1;
 }
 
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
        unsigned long flags;
 
        if (unlikely(tgt == NULL)) {
-               ql_dbg(ql_dbg_io, vha, 0x3064,
+               ql_dbg(ql_dbg_tgt, vha, 0x3064,
                    "ATIO pkt, but no tgt (ha %p)", ha);
                return;
        }
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
 
 
                if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
-                       rc = qlt_chk_qfull_thresh_hold(vha, atio);
+                       rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
                        if (rc != 0) {
                                tgt->atio_irq_cmd_count--;
                                return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
                        break;
                }
 
-               rc = qlt_chk_qfull_thresh_hold(vha, atio);
+               rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
                if (rc != 0) {
                        tgt->irq_cmd_count--;
                        return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
 
        fcport->loop_id = loop_id;
 
-       rc = qla2x00_get_port_database(vha, fcport, 0);
+       rc = qla24xx_gpdb_wait(vha, fcport, 0);
        if (rc != QLA_SUCCESS) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
                    "qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
                }
        }
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-
-       if (tgt->tgt_stop)
-               goto out_term;
-
        rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+       ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
        if (rc != 0)
                goto out_term;
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (sess)
-               ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
        return;
 
 out_term2:
-       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
 out_term:
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       if (sess)
-               ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 }
 
 static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 
        if (tgt->tgt_stop)
-               goto out_term;
+               goto out_term2;
 
        s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
 
                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
                if (!sess)
-                       goto out_term;
+                       goto out_term2;
        } else {
                if (sess->deleted) {
                        sess = NULL;
-                       goto out_term;
+                       goto out_term2;
                }
 
                if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
                            "%s: kref_get fail %8phC\n",
                             __func__, sess->port_name);
                        sess = NULL;
-                       goto out_term;
+                       goto out_term2;
                }
        }
 
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
        unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
 
        rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
-       if (rc != 0)
-               goto out_term;
-
        ha->tgt.tgt_ops->put_sess(sess);
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+       if (rc != 0)
+               goto out_term;
        return;
 
+out_term2:
+       if (sess)
+               ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 out_term:
        qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
-       ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
        tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
        tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
 
-       if (base_vha->fc_vport)
-               return 0;
-
        mutex_lock(&qla_tgt_mutex);
        list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
        mutex_unlock(&qla_tgt_mutex);
 
+       if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+               ha->tgt.tgt_ops->add_target(base_vha);
+
        return 0;
 }
 
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
        return 0;
 }
 
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+       struct scsi_qla_host *node;
+       u32 key = 0;
+
+       btree_for_each_safe32(&ha->tgt.host_map, key, node)
+               btree_remove32(&ha->tgt.host_map, key);
+
+       btree_destroy32(&ha->tgt.host_map);
+}
+
 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
        unsigned char *b)
 {
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
        struct atio_from_isp *pkt;
        int cnt, i;
 
-       if (!vha->flags.online)
+       if (!ha->flags.fw_started)
                return;
 
        while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
 void
 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 {
+       int rc;
+
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
            qlt_unknown_atio_work_fn);
 
        qlt_clear_mode(base_vha);
+
+       rc = btree_init32(&ha->tgt.host_map);
+       if (rc)
+               ql_log(ql_log_info, base_vha, 0xffff,
+                   "Unable to initialize ha->host_map btree\n");
+
+       qlt_update_vp_map(base_vha, SET_VP_IDX);
 }
 
 irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       kfree(op);
 }
 
 void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
 void
 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
 {
+       void *slot;
+       u32 key;
+       int rc;
+
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
+       key = vha->d_id.b24;
+
        switch (cmd) {
        case SET_VP_IDX:
                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
                break;
        case SET_AL_PA:
-               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+               slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+               if (!slot) {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                           "Save vha in host_map %p %06x\n", vha, key);
+                       rc = btree_insert32(&vha->hw->tgt.host_map,
+                               key, vha, GFP_ATOMIC);
+                       if (rc)
+                               ql_log(ql_log_info, vha, 0xffff,
+                                   "Unable to insert s_id into host_map: %06x\n",
+                                   key);
+                       return;
+               }
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                       "replace existing vha in host_map %p %06x\n", vha, key);
+               btree_update32(&vha->hw->tgt.host_map, key, vha);
                break;
        case RESET_VP_IDX:
                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
                break;
        case RESET_AL_PA:
-               vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+                  "clear vha in host_map %p %06x\n", vha, key);
+               slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+               if (slot)
+                       btree_remove32(&vha->hw->tgt.host_map, key);
+               vha->d_id.b24 = 0;
                break;
        }
 }
 
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+       unsigned long flags;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!vha->d_id.b24) {
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               vha->d_id = id;
+               qlt_update_vp_map(vha, SET_AL_PA);
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+       } else if (vha->d_id.b24 != id.b24) {
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               qlt_update_vp_map(vha, RESET_AL_PA);
+               vha->d_id = id;
+               qlt_update_vp_map(vha, SET_AL_PA);
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+       }
+}
+
 static int __init qlt_parse_ini_mode(void)
 {
        if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
index a7f90dc..d644202 100644 (file)
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
        atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
 }
 
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+       int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+       return (be32_to_cpu(get_unaligned((uint32_t *)
+           &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
 
 /*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
        int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
                        unsigned char *, uint32_t, int, int, int);
        void (*handle_data)(struct qla_tgt_cmd *);
-       void (*handle_dif_err)(struct qla_tgt_cmd *);
        int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
                        uint32_t);
        void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
        void (*clear_nacl_from_fcport_map)(struct fc_port *);
        void (*put_sess)(struct fc_port *);
        void (*shutdown_sess)(struct fc_port *);
+       int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+       int (*chk_dif_tags)(uint32_t tag);
+       void (*add_target)(struct scsi_qla_host *);
 };
 
 int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
 #define QLA_TGT_ABORT_ALL               0xFFFE
 #define QLA_TGT_NEXUS_LOSS_SESS         0xFFFD
 #define QLA_TGT_NEXUS_LOSS              0xFFFC
-#define QLA_TGT_ABTS                                   0xFFFB
-#define QLA_TGT_2G_ABORT_TASK                  0xFFFA
+#define QLA_TGT_ABTS                   0xFFFB
+#define QLA_TGT_2G_ABORT_TASK          0xFFFA
 
 /* Notify Acknowledge flags */
 #define NOTIFY_ACK_RES_COUNT        BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
        TRC_CMD_FREE = BIT_17,
        TRC_DATA_IN = BIT_18,
        TRC_ABORT = BIT_19,
+       TRC_DIF_ERR = BIT_20,
 };
 
 struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
        unsigned int sg_mapped:1;
        unsigned int free_sg:1;
        unsigned int write_data_transferred:1;
-       unsigned int ctx_dsd_alloced:1;
        unsigned int q_full:1;
        unsigned int term_exchg:1;
        unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
        struct list_head cmd_list;
 
        struct atio_from_isp atio;
-       /* t10dif */
+
+       uint8_t ctx_dsd_alloced;
+
+       /* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+       int8_t dif_err_code;
        struct scatterlist *prot_sg;
        uint32_t prot_sg_cnt;
-       uint32_t blk_sz;
+       uint32_t blk_sz, num_blks;
+       uint8_t scsi_status, sense_key, asc, ascq;
+
        struct crc_context *ctx;
+       uint8_t         *cdb;
+       uint64_t        lba;
+       uint16_t        a_guard, e_guard, a_app_tag, e_app_tag;
+       uint32_t        a_ref_tag, e_ref_tag;
 
        uint64_t jiffies_at_alloc;
        uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
 extern void qlt_logo_completion_handler(fc_port_t *, int);
 extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
 
+void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+    uint8_t, uint8_t, uint8_t);
+
 #endif /* __QLA_TARGET_H */
index 3cb1964..45bc84e 100644 (file)
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.07.00.38-k"
+#define QLA2XXX_VERSION      "9.00.00.00-k"
 
-#define QLA_DRIVER_MAJOR_VER   8
-#define QLA_DRIVER_MINOR_VER   7
+#define QLA_DRIVER_MAJOR_VER   9
+#define QLA_DRIVER_MINOR_VER   0
 #define QLA_DRIVER_PATCH_VER   0
 #define QLA_DRIVER_BETA_VER    0
index 8e8ab0f..7443e4e 100644 (file)
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
                        return;
                }
 
+               switch (cmd->dif_err_code) {
+               case DIF_ERR_GRD:
+                       cmd->se_cmd.pi_err =
+                           TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+                       break;
+               case DIF_ERR_REF:
+                       cmd->se_cmd.pi_err =
+                           TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+                       break;
+               case DIF_ERR_APP:
+                       cmd->se_cmd.pi_err =
+                           TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+                       break;
+               case DIF_ERR_NONE:
+               default:
+                       break;
+               }
+
                if (cmd->se_cmd.pi_err)
                        transport_generic_request_failure(&cmd->se_cmd,
                                cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
        queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
 }
 
-static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
 {
-       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-
-       /* take an extra kref to prevent cmd free too early.
-        * need to wait for SCSI status/check condition to
-        * finish responding generate by transport_generic_request_failure.
-        */
-       kref_get(&cmd->se_cmd.cmd_kref);
-       transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+       return 0;
 }
 
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+    uint16_t *pfw_prot_opts)
 {
-       INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
-       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+
+       if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+               *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+       if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+               *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+       return 0;
 }
 
 /*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .handle_cmd             = tcm_qla2xxx_handle_cmd,
        .handle_data            = tcm_qla2xxx_handle_data,
-       .handle_dif_err         = tcm_qla2xxx_handle_dif_err,
        .handle_tmr             = tcm_qla2xxx_handle_tmr,
        .free_cmd               = tcm_qla2xxx_free_cmd,
        .free_mcmd              = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
        .put_sess               = tcm_qla2xxx_put_sess,
        .shutdown_sess          = tcm_qla2xxx_shutdown_sess,
+       .get_dif_tags           = tcm_qla2xxx_dif_tags,
+       .chk_dif_tags           = tcm_qla2xxx_chk_dif_tags,
 };
 
 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
index 1359913..e8c26e6 100644 (file)
@@ -7642,7 +7642,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
 
-       if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
+       if (value >= UFS_PM_LVL_MAX)
                return -EINVAL;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
index f5e3300..fd7c16a 100644 (file)
@@ -43,7 +43,7 @@
 #include "target_core_ua.h"
 
 static sense_reason_t core_alua_check_transition(int state, int valid,
-                                                int *primary);
+                                                int *primary, int explicit);
 static int core_alua_set_tg_pt_secondary_state(
                struct se_lun *lun, int explicit, int offline);
 
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
                 * the state is a primary or secondary target port asymmetric
                 * access state.
                 */
-               rc = core_alua_check_transition(alua_access_state,
-                                               valid_states, &primary);
+               rc = core_alua_check_transition(alua_access_state, valid_states,
+                                               &primary, 1);
                if (rc) {
                        /*
                         * If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
                return 0;
 
        /*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
  * Check implicit and explicit ALUA state change request.
  */
 static sense_reason_t
-core_alua_check_transition(int state, int valid, int *primary)
+core_alua_check_transition(int state, int valid, int *primary, int explicit)
 {
        /*
         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
                *primary = 0;
                break;
        case ALUA_ACCESS_STATE_TRANSITION:
-               /*
-                * Transitioning is set internally, and
-                * cannot be selected manually.
-                */
-               goto not_supported;
+               if (!(valid & ALUA_T_SUP) || explicit)
+                       /*
+                        * Transitioning is set internally and by tcmu daemon,
+                        * and cannot be selected through a STPG.
+                        */
+                       goto not_supported;
+               *primary = 0;
+               break;
        default:
                pr_err("Unknown ALUA access state: 0x%02x\n", state);
                return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
        bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
        if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
                return 0;
 
-       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
                return -EAGAIN;
 
        /*
         * Flush any pending transitions
         */
-       if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
-           atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
-           ALUA_ACCESS_STATE_TRANSITION) {
-               /* Just in case */
-               tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-               tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
-               wait_for_completion(&wait);
-               tg_pt_gp->tg_pt_gp_transition_complete = NULL;
-               return 0;
-       }
+       if (!explicit)
+               flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
        /*
         * Save the old primary ALUA access state, and set the current state
         * to ALUA_ACCESS_STATE_TRANSITION.
         */
-       tg_pt_gp->tg_pt_gp_alua_previous_state =
-               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
-       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-
        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
                        ALUA_ACCESS_STATE_TRANSITION);
        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
 
        core_alua_queue_state_change_ua(tg_pt_gp);
 
+       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+               return 0;
+
+       tg_pt_gp->tg_pt_gp_alua_previous_state =
+               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
        /*
         * Check for the optional ALUA primary state transition delay
         */
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
-               unsigned long transition_tmo;
-
-               transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
-               queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-                                  &tg_pt_gp->tg_pt_gp_transition_work,
-                                  transition_tmo);
-       } else {
+       schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       if (explicit) {
                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-                                  &tg_pt_gp->tg_pt_gp_transition_work, 0);
                wait_for_completion(&wait);
                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
        }
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        int primary, valid_states, rc = 0;
 
+       if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+               return -ENODEV;
+
        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
-       if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
+       if (core_alua_check_transition(new_state, valid_states, &primary,
+                                      explicit) != 0)
                return -EINVAL;
 
        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-       INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-                         core_alua_do_transition_tg_pt_work);
+       INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+                 core_alua_do_transition_tg_pt_work);
        tg_pt_gp->tg_pt_gp_dev = dev;
        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
                ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
        dev->t10_alua.alua_tg_pt_gps_counter--;
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
        /*
         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
        unsigned char buf[TG_PT_GROUP_NAME_BUF];
        int move = 0;
 
-       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                return -ENODEV;
 
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
        unsigned long tmp;
        int ret;
 
-       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                return -ENODEV;
 
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
 
 int core_setup_alua(struct se_device *dev)
 {
-       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+       if (!(dev->transport->transport_flags &
+            TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                struct t10_alua_lu_gp_member *lu_gp_mem;
 
index 54b36c9..38b5025 100644 (file)
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
                pr_err("Missing tfo->aborted_task()\n");
                return -EINVAL;
        }
+       if (!tfo->check_stop_free) {
+               pr_err("Missing tfo->check_stop_free()\n");
+               return -EINVAL;
+       }
        /*
         * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
         * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
index a8f8e53..94cda79 100644 (file)
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
 
        buf = kzalloc(12, GFP_KERNEL);
        if (!buf)
-               return;
+               goto out_free;
 
        memset(cdb, 0, MAX_COMMAND_SIZE);
        cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
         * If MODE_SENSE still returns zero, set the default value to 1024.
         */
        sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
        if (!sdev->sector_size)
                sdev->sector_size = 1024;
-out_free:
+
        kfree(buf);
 }
 
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
                                sd->lun, sd->queue_depth);
        }
 
-       dev->dev_attrib.hw_block_size = sd->sector_size;
+       dev->dev_attrib.hw_block_size =
+               min_not_zero((int)sd->sector_size, 512);
        dev->dev_attrib.hw_max_sectors =
-               min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+               min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
        dev->dev_attrib.hw_queue_depth = sd->queue_depth;
 
        /*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
        /*
         * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
         */
-       if (sd->type == TYPE_TAPE)
+       if (sd->type == TYPE_TAPE) {
                pscsi_tape_read_blocksize(dev, sd);
+               dev->dev_attrib.hw_block_size = sd->sector_size;
+       }
        return 0;
 }
 
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
 /*
  * Called with struct Scsi_Host->host_lock called.
  */
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
        __releases(sh->host_lock)
 {
        struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
        return 0;
 }
 
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
-               struct scsi_device *sd)
-       __releases(sh->host_lock)
-{
-       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
-       struct Scsi_Host *sh = sd->host;
-       int ret;
-
-       spin_unlock_irq(sh->host_lock);
-       ret = pscsi_add_device_to_list(dev, sd);
-       if (ret)
-               return ret;
-
-       pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
-               phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
-               sd->channel, sd->id, sd->lun);
-       return 0;
-}
-
 static int pscsi_configure_device(struct se_device *dev)
 {
        struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
                case TYPE_DISK:
                        ret = pscsi_create_type_disk(dev, sd);
                        break;
-               case TYPE_ROM:
-                       ret = pscsi_create_type_rom(dev, sd);
-                       break;
                default:
-                       ret = pscsi_create_type_other(dev, sd);
+                       ret = pscsi_create_type_nondisk(dev, sd);
                        break;
                }
 
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
                else if (pdv->pdv_lld_host)
                        scsi_host_put(pdv->pdv_lld_host);
 
-               if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
-                       scsi_device_put(sd);
+               scsi_device_put(sd);
 
                pdv->pdv_sd = NULL;
        }
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
        if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
                return pdv->pdv_bd->bd_part->nr_sects;
 
-       dump_stack();
        return 0;
 }
 
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
 static const struct target_backend_ops pscsi_ops = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
-       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH |
+                                 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
        .attach_hba             = pscsi_attach_hba,
        .detach_hba             = pscsi_detach_hba,
        .pmode_enable_hba       = pscsi_pmode_enable_hba,
index 68d8aef..c194063 100644 (file)
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
                break;
        case VERIFY:
+       case VERIFY_16:
                size = 0;
-               sectors = transport_get_sectors_10(cdb);
-               cmd->t_task_lba = transport_lba_32(cdb);
+               if (cdb[0] == VERIFY) {
+                       sectors = transport_get_sectors_10(cdb);
+                       cmd->t_task_lba = transport_lba_32(cdb);
+               } else {
+                       sectors = transport_get_sectors_16(cdb);
+                       cmd->t_task_lba = transport_lba_64(cdb);
+               }
                cmd->execute_cmd = sbc_emulate_noop;
                goto check_lba;
        case REZERO_UNIT:
index c0dbfa0..6fb1919 100644 (file)
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
        if (ret)
                goto out_kill_ref;
 
-       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+       if (!(dev->transport->transport_flags &
+            TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
 
index 434d9d6..b1a3cdb 100644 (file)
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
         * Fabric modules are expected to return '1' here if the se_cmd being
         * passed is released at this point, or zero if not being released.
         */
-       return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
-               : 0;
+       return cmd->se_tfo->check_stop_free(cmd);
 }
 
 static void transport_lun_remove_cmd(struct se_cmd *cmd)
index c3adefe..c6874c3 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/highmem.h>
+#include <linux/configfs.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
        spinlock_t commands_lock;
 
        struct timer_list timeout;
+       unsigned int cmd_time_out;
 
        char dev_config[TCMU_CONFIG_LEN];
 };
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 
        tcmu_cmd->se_cmd = se_cmd;
        tcmu_cmd->tcmu_dev = udev;
-       tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+       if (udev->cmd_time_out)
+               tcmu_cmd->deadline = jiffies +
+                                       msecs_to_jiffies(udev->cmd_time_out);
 
        idr_preload(GFP_KERNEL);
        spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
                pr_debug("sleeping for ring space\n");
                spin_unlock_irq(&udev->cmdr_lock);
-               ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+               if (udev->cmd_time_out)
+                       ret = schedule_timeout(
+                                       msecs_to_jiffies(udev->cmd_time_out));
+               else
+                       ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
                finish_wait(&udev->wait_cmdr, &__wait);
                if (!ret) {
                        pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        /* TODO: only if FLUSH and FUA? */
        uio_event_notify(&udev->uio_info);
 
-       mod_timer(&udev->timeout,
-               round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+       if (udev->cmd_time_out)
+               mod_timer(&udev->timeout, round_jiffies_up(jiffies +
+                         msecs_to_jiffies(udev->cmd_time_out)));
 
        return TCM_NO_SENSE;
 }
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        }
 
        udev->hba = hba;
+       udev->cmd_time_out = TCMU_TIME_OUT;
 
        init_waitqueue_head(&udev->wait_cmdr);
        spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
        if (dev->dev_attrib.hw_block_size == 0)
                dev->dev_attrib.hw_block_size = 512;
        /* Other attributes can be configured in userspace */
-       dev->dev_attrib.hw_max_sectors = 128;
+       if (!dev->dev_attrib.hw_max_sectors)
+               dev->dev_attrib.hw_max_sectors = 128;
        dev->dev_attrib.hw_queue_depth = 128;
 
        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
        kfree(udev);
 }
 
+static bool tcmu_dev_configured(struct tcmu_dev *udev)
+{
+       return udev->uio_info.uio_dev ? true : false;
+}
+
 static void tcmu_free_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
        spin_unlock_irq(&udev->commands_lock);
        WARN_ON(!all_expired);
 
-       /* Device was configured */
-       if (udev->uio_info.uio_dev) {
+       if (tcmu_dev_configured(udev)) {
                tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
                                   udev->uio_info.uio_dev->minor);
 
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
 }
 
 enum {
-       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
+       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
+       Opt_err,
 };
 
 static match_table_t tokens = {
        {Opt_dev_config, "dev_config=%s"},
        {Opt_dev_size, "dev_size=%u"},
        {Opt_hw_block_size, "hw_block_size=%u"},
+       {Opt_hw_max_sectors, "hw_max_sectors=%u"},
        {Opt_err, NULL}
 };
 
+static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
+{
+       unsigned long tmp_ul;
+       char *arg_p;
+       int ret;
+
+       arg_p = match_strdup(arg);
+       if (!arg_p)
+               return -ENOMEM;
+
+       ret = kstrtoul(arg_p, 0, &tmp_ul);
+       kfree(arg_p);
+       if (ret < 0) {
+               pr_err("kstrtoul() failed for dev attrib\n");
+               return ret;
+       }
+       if (!tmp_ul) {
+               pr_err("dev attrib must be nonzero\n");
+               return -EINVAL;
+       }
+       *dev_attrib = tmp_ul;
+       return 0;
+}
+
 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                const char *page, ssize_t count)
 {
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
        char *orig, *ptr, *opts, *arg_p;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
-       unsigned long tmp_ul;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                                pr_err("kstrtoul() failed for dev_size=\n");
                        break;
                case Opt_hw_block_size:
-                       arg_p = match_strdup(&args[0]);
-                       if (!arg_p) {
-                               ret = -ENOMEM;
-                               break;
-                       }
-                       ret = kstrtoul(arg_p, 0, &tmp_ul);
-                       kfree(arg_p);
-                       if (ret < 0) {
-                               pr_err("kstrtoul() failed for hw_block_size=\n");
-                               break;
-                       }
-                       if (!tmp_ul) {
-                               pr_err("hw_block_size must be nonzero\n");
-                               break;
-                       }
-                       dev->dev_attrib.hw_block_size = tmp_ul;
+                       ret = tcmu_set_dev_attrib(&args[0],
+                                       &(dev->dev_attrib.hw_block_size));
+                       break;
+               case Opt_hw_max_sectors:
+                       ret = tcmu_set_dev_attrib(&args[0],
+                                       &(dev->dev_attrib.hw_max_sectors));
                        break;
                default:
                        break;
                }
+
+               if (ret)
+                       break;
        }
 
        kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
        return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
 }
 
-static const struct target_backend_ops tcmu_ops = {
+static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                       struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = container_of(da->da_dev,
+                                       struct tcmu_dev, se_dev);
+
+       return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
+                                      size_t count)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                       struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = container_of(da->da_dev,
+                                       struct tcmu_dev, se_dev);
+       u32 val;
+       int ret;
+
+       if (da->da_dev->export_count) {
+               pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
+               return -EINVAL;
+       }
+
+       ret = kstrtou32(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (!val) {
+               pr_err("Illegal value for cmd_time_out\n");
+               return -EINVAL;
+       }
+
+       udev->cmd_time_out = val * MSEC_PER_SEC;
+       return count;
+}
+CONFIGFS_ATTR(tcmu_, cmd_time_out);
+
+static struct configfs_attribute **tcmu_attrs;
+
+static struct target_backend_ops tcmu_ops = {
        .name                   = "user",
        .owner                  = THIS_MODULE,
        .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
        .show_configfs_dev_params = tcmu_show_configfs_dev_params,
        .get_device_type        = sbc_get_device_type,
        .get_blocks             = tcmu_get_blocks,
-       .tb_dev_attrib_attrs    = passthrough_attrib_attrs,
+       .tb_dev_attrib_attrs    = NULL,
 };
 
 static int __init tcmu_module_init(void)
 {
-       int ret;
+       int ret, i, len = 0;
 
        BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
 
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
                goto out_unreg_device;
        }
 
+       for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+               len += sizeof(struct configfs_attribute *);
+       }
+       len += sizeof(struct configfs_attribute *) * 2;
+
+       tcmu_attrs = kzalloc(len, GFP_KERNEL);
+       if (!tcmu_attrs) {
+               ret = -ENOMEM;
+               goto out_unreg_genl;
+       }
+
+       for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+               tcmu_attrs[i] = passthrough_attrib_attrs[i];
+       }
+       tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+       tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
+
        ret = transport_backend_register(&tcmu_ops);
        if (ret)
-               goto out_unreg_genl;
+               goto out_attrs;
 
        return 0;
 
+out_attrs:
+       kfree(tcmu_attrs);
 out_unreg_genl:
        genl_unregister_family(&tcmu_genl_family);
 out_unreg_device:
@@ -1194,6 +1287,7 @@ out_free_cache:
 static void __exit tcmu_module_exit(void)
 {
        target_backend_unregister(&tcmu_ops);
+       kfree(tcmu_attrs);
        genl_unregister_family(&tcmu_genl_family);
        root_device_unregister(tcmu_root_device);
        kmem_cache_destroy(tcmu_cmd_cache);
index bcf1d33..c334bcc 100644 (file)
@@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
                        pinctrl_select_state(ascport->pinctrl,
                                             ascport->states[NO_HW_FLOWCTRL]);
 
-                       gpiod = devm_get_gpiod_from_child(port->dev, "rts",
-                                                         &np->fwnode);
-                       if (!IS_ERR(gpiod)) {
-                               gpiod_direction_output(gpiod, 0);
+                       gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
+                                                                "rts",
+                                                                &np->fwnode,
+                                                                GPIOD_OUT_LOW,
+                                                                np->name);
+                       if (!IS_ERR(gpiod))
                                ascport->rts = gpiod;
-                       }
                }
        }
 
index c26fa1f..32d2633 100644 (file)
@@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
        return NULL;
 }
 
-static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
-                                   phys_addr_t *base)
+static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
 {
        struct list_head group_resv_regions;
        struct iommu_resv_region *region, *next;
@@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
        INIT_LIST_HEAD(&group_resv_regions);
        iommu_get_group_resv_regions(group, &group_resv_regions);
        list_for_each_entry(region, &group_resv_regions, list) {
-               if (region->type & IOMMU_RESV_MSI) {
+               if (region->type == IOMMU_RESV_SW_MSI) {
                        *base = region->start;
                        ret = true;
                        goto out;
@@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        if (ret)
                goto out_domain;
 
-       resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+       resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
 
        INIT_LIST_HEAD(&domain->group_list);
        list_add(&group->next, &domain->group_list);
index ce5e63d..44eed8e 100644 (file)
@@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        return len;
 }
 
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+       struct vhost_vsock *vsock;
+       struct virtio_vsock_pkt *pkt, *n;
+       int cnt = 0;
+       LIST_HEAD(freeme);
+
+       /* Find the vhost_vsock according to guest context id  */
+       vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+       if (!vsock)
+               return -ENODEV;
+
+       spin_lock_bh(&vsock->send_pkt_list_lock);
+       list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+               if (pkt->vsk != vsk)
+                       continue;
+               list_move(&pkt->list, &freeme);
+       }
+       spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+       list_for_each_entry_safe(pkt, n, &freeme, list) {
+               if (pkt->reply)
+                       cnt++;
+               list_del(&pkt->list);
+               virtio_transport_free_pkt(pkt);
+       }
+
+       if (cnt) {
+               struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+               int new_cnt;
+
+               new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+               if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+                       vhost_poll_queue(&tx_vq->poll);
+       }
+
+       return 0;
+}
+
 static struct virtio_vsock_pkt *
 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
                      unsigned int out, unsigned int in)
@@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
                .release                  = virtio_transport_release,
                .connect                  = virtio_transport_connect,
                .shutdown                 = virtio_transport_shutdown,
+               .cancel_pkt               = vhost_transport_cancel_pkt,
 
                .dgram_enqueue            = virtio_transport_dgram_enqueue,
                .dgram_dequeue            = virtio_transport_dgram_dequeue,
index c77a075..f3bf8f4 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
+#include <linux/refcount.h>
 
 #include <xen/xen.h>
 #include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
        int index;
        int count;
        int flags;
-       atomic_t users;
+       refcount_t users;
        struct unmap_notify notify;
        struct ioctl_gntdev_grant_ref *grants;
        struct gnttab_map_grant_ref   *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 
        add->index = 0;
        add->count = count;
-       atomic_set(&add->users, 1);
+       refcount_set(&add->users, 1);
 
        return add;
 
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
        if (!map)
                return;
 
-       if (!atomic_dec_and_test(&map->users))
+       if (!refcount_dec_and_test(&map->users))
                return;
 
        atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
        struct grant_map *map = vma->vm_private_data;
 
        pr_debug("gntdev_vma_open %p\n", vma);
-       atomic_inc(&map->users);
+       refcount_inc(&map->users);
 }
 
 static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                goto unlock_out;
        }
 
-       atomic_inc(&map->users);
+       refcount_inc(&map->users);
 
        vma->vm_ops = &gntdev_vmops;
 
index 4ce10bc..23e391d 100644 (file)
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/syscore_ops.h>
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 #include <xen/xen.h>
-#include <xen/xen-ops.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
 
@@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            read_acpi_id, NULL, NULL, NULL);
-       acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL);
 
 upload:
        if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
        return rc;
 }
 
-static int xen_acpi_processor_resume(struct notifier_block *nb,
-                                    unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
 {
+       int rc;
+
        bitmap_zero(acpi_ids_done, nr_acpi_bits);
-       return xen_upload_processor_pm_data();
+
+       rc = xen_upload_processor_pm_data();
+       if (rc != 0)
+               pr_info("ACPI data upload failed, error = %d\n", rc);
+}
+
+static void xen_acpi_processor_resume(void)
+{
+       static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+       /*
+        * xen_upload_processor_pm_data() calls non-atomic code.
+        * However, the context for xen_acpi_processor_resume is syscore
+        * with only the boot CPU online and in an atomic context.
+        *
+        * So defer the upload for some point safer.
+        */
+       schedule_work(&wq);
 }
 
-struct notifier_block xen_acpi_processor_resume_nb = {
-       .notifier_call = xen_acpi_processor_resume,
+static struct syscore_ops xap_syscore_ops = {
+       .resume = xen_acpi_processor_resume,
 };
 
 static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
        if (rc)
                goto err_unregister;
 
-       xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+       register_syscore_ops(&xap_syscore_ops);
 
        return 0;
 err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
 {
        int i;
 
-       xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+       unregister_syscore_ops(&xap_syscore_ops);
        kfree(acpi_ids_done);
        kfree(acpi_id_present);
        kfree(acpi_id_cst_present);
index b29447e..25d404d 100644 (file)
@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
 {
        struct afs_server *server;
        struct afs_vnode *vnode, *xvnode;
-       time_t now;
+       time64_t now;
        long timeout;
        int ret;
 
@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
 
        /* find the first vnode to update */
        spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
 
        /* and then reschedule */
        _debug("reschedule");
-       vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+       vnode->update_at = ktime_get_real_seconds() +
+                       afs_vnode_update_timeout;
 
        spin_lock(&server->cb_lock);
 
index 2edbdcb..3062cce 100644 (file)
@@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
        struct afs_callback *cb;
        struct afs_server *server;
        __be32 *bp;
-       u32 tmp;
        int ret, loop;
 
        _enter("{%u}", call->unmarshall);
@@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
                if (ret < 0)
                        return ret;
 
-               tmp = ntohl(call->tmp);
-               _debug("CB count: %u", tmp);
-               if (tmp != call->count && tmp != 0)
+               call->count2 = ntohl(call->tmp);
+               _debug("CB count: %u", call->count2);
+               if (call->count2 != call->count && call->count2 != 0)
                        return -EBADMSG;
                call->offset = 0;
                call->unmarshall++;
@@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
        case 4:
                _debug("extract CB array");
                ret = afs_extract_data(call, call->buffer,
-                                      call->count * 3 * 4, false);
+                                      call->count2 * 3 * 4, false);
                if (ret < 0)
                        return ret;
 
                _debug("unmarshall CB array");
                cb = call->request;
                bp = call->buffer;
-               for (loop = call->count; loop > 0; loop--, cb++) {
+               for (loop = call->count2; loop > 0; loop--, cb++) {
                        cb->version     = ntohl(*bp++);
                        cb->expiry      = ntohl(*bp++);
                        cb->type        = ntohl(*bp++);
index ba7b71f..0d5b850 100644 (file)
@@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
 
 const struct file_operations afs_file_operations = {
        .open           = afs_open,
+       .flush          = afs_flush,
        .release        = afs_release,
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
@@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page)
                if (!req)
                        goto enomem;
 
+               /* We request a full page.  If the page is a partial one at the
+                * end of the file, the server will return a short read and the
+                * unmarshalling code will clear the unfilled space.
+                */
                atomic_set(&req->usage, 1);
                req->pos = (loff_t)page->index << PAGE_SHIFT;
-               req->len = min_t(size_t, i_size_read(inode) - req->pos,
-                                PAGE_SIZE);
+               req->len = PAGE_SIZE;
                req->nr_pages = 1;
                req->pages[0] = page;
                get_page(page);
@@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page)
                        fscache_uncache_page(vnode->cache, page);
 #endif
                        BUG_ON(PageFsCache(page));
-                       goto error;
+
+                       if (ret == -EINTR ||
+                           ret == -ENOMEM ||
+                           ret == -ERESTARTSYS ||
+                           ret == -EAGAIN)
+                               goto error;
+                       goto io_error;
                }
 
                SetPageUptodate(page);
@@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page)
        _leave(" = 0");
        return 0;
 
+io_error:
+       SetPageError(page);
+       goto error;
 enomem:
        ret = -ENOMEM;
 error:
-       SetPageError(page);
        unlock_page(page);
        _leave(" = %d", ret);
        return ret;
index ac8e766..19f76ae 100644 (file)
 #include "internal.h"
 #include "afs_fs.h"
 
+/*
+ * We need somewhere to discard into in case the server helpfully returns more
+ * than we asked for in FS.FetchData{,64}.
+ */
+static u8 afs_discard_buffer[64];
+
 /*
  * decode an AFSFid block
  */
@@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
                        vnode->vfs_inode.i_mode = mode;
                }
 
-               vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
+               vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
                vnode->vfs_inode.i_mtime        = vnode->vfs_inode.i_ctime;
                vnode->vfs_inode.i_atime        = vnode->vfs_inode.i_ctime;
                vnode->vfs_inode.i_version      = data_version;
@@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
        vnode->cb_version       = ntohl(*bp++);
        vnode->cb_expiry        = ntohl(*bp++);
        vnode->cb_type          = ntohl(*bp++);
-       vnode->cb_expires       = vnode->cb_expiry + get_seconds();
+       vnode->cb_expires       = vnode->cb_expiry + ktime_get_real_seconds();
        *_bp = bp;
 }
 
@@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
        void *buffer;
        int ret;
 
-       _enter("{%u,%zu/%u;%u/%llu}",
+       _enter("{%u,%zu/%u;%llu/%llu}",
               call->unmarshall, call->offset, call->count,
               req->remain, req->actual_len);
 
@@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
 
                req->actual_len |= ntohl(call->tmp);
                _debug("DATA length: %llu", req->actual_len);
-               /* Check that the server didn't want to send us extra.  We
-                * might want to just discard instead, but that requires
-                * cooperation from AF_RXRPC.
-                */
-               if (req->actual_len > req->len)
-                       return -EBADMSG;
 
                req->remain = req->actual_len;
                call->offset = req->pos & (PAGE_SIZE - 1);
@@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                call->unmarshall++;
 
        begin_page:
+               ASSERTCMP(req->index, <, req->nr_pages);
                if (req->remain > PAGE_SIZE - call->offset)
                        size = PAGE_SIZE - call->offset;
                else
@@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
 
                /* extract the returned data */
        case 3:
-               _debug("extract data %u/%llu %zu/%u",
+               _debug("extract data %llu/%llu %zu/%u",
                       req->remain, req->actual_len, call->offset, call->count);
 
                buffer = kmap(req->pages[req->index]);
@@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                if (call->offset == PAGE_SIZE) {
                        if (req->page_done)
                                req->page_done(call, req);
+                       req->index++;
                        if (req->remain > 0) {
-                               req->index++;
                                call->offset = 0;
+                               if (req->index >= req->nr_pages) {
+                                       call->unmarshall = 4;
+                                       goto begin_discard;
+                               }
                                goto begin_page;
                        }
                }
+               goto no_more_data;
+
+               /* Discard any excess data the server gave us */
+       begin_discard:
+       case 4:
+               size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
+               call->count = size;
+               _debug("extract discard %llu/%llu %zu/%u",
+                      req->remain, req->actual_len, call->offset, call->count);
+
+               call->offset = 0;
+               ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
+               req->remain -= call->offset;
+               if (ret < 0)
+                       return ret;
+               if (req->remain > 0)
+                       goto begin_discard;
 
        no_more_data:
                call->offset = 0;
-               call->unmarshall++;
+               call->unmarshall = 5;
 
                /* extract the metadata */
-       case 4:
+       case 5:
                ret = afs_extract_data(call, call->buffer,
                                       (21 + 3 + 6) * 4, false);
                if (ret < 0)
@@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                call->offset = 0;
                call->unmarshall++;
 
-       case 5:
+       case 6:
                break;
        }
 
-       if (call->count < PAGE_SIZE) {
-               buffer = kmap(req->pages[req->index]);
-               memset(buffer + call->count, 0, PAGE_SIZE - call->count);
-               kunmap(req->pages[req->index]);
+       for (; req->index < req->nr_pages; req->index++) {
+               if (call->count < PAGE_SIZE)
+                       zero_user_segment(req->pages[req->index],
+                                         call->count, PAGE_SIZE);
                if (req->page_done)
                        req->page_done(call, req);
+               call->count = 0;
        }
 
        _leave(" = 0 [done]");
@@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server,
                memset(bp, 0, padsz);
                bp = (void *) bp + padsz;
        }
-       *bp++ = htonl(AFS_SET_MODE);
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server,
                memset(bp, 0, c_padsz);
                bp = (void *) bp + c_padsz;
        }
-       *bp++ = htonl(AFS_SET_MODE);
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       *bp++ = 0; /* mask */
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MTIME); /* mask */
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = 0; /* unix mode */
@@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
        _enter(",%x,{%x:%u},,",
               key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
 
-       size = to - offset;
+       size = (loff_t)to - (loff_t)offset;
        if (first != last)
                size += (loff_t)(last - first) << PAGE_SHIFT;
        pos = (loff_t)first << PAGE_SHIFT;
@@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       *bp++ = 0; /* mask */
-       *bp++ = 0; /* mtime */
+       *bp++ = htonl(AFS_SET_MTIME); /* mask */
+       *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
        *bp++ = 0; /* owner */
        *bp++ = 0; /* group */
        *bp++ = 0; /* unix mode */
index 1e4897a..aae55dd 100644 (file)
@@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
                inode->i_fop    = &afs_dir_file_operations;
                break;
        case AFS_FTYPE_SYMLINK:
-               inode->i_mode   = S_IFLNK | vnode->status.mode;
-               inode->i_op     = &page_symlink_inode_operations;
+               /* Symlinks with a mode of 0644 are actually mountpoints. */
+               if ((vnode->status.mode & 0777) == 0644) {
+                       inode->i_flags |= S_AUTOMOUNT;
+
+                       spin_lock(&vnode->lock);
+                       set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+                       spin_unlock(&vnode->lock);
+
+                       inode->i_mode   = S_IFDIR | 0555;
+                       inode->i_op     = &afs_mntpt_inode_operations;
+                       inode->i_fop    = &afs_mntpt_file_operations;
+               } else {
+                       inode->i_mode   = S_IFLNK | vnode->status.mode;
+                       inode->i_op     = &page_symlink_inode_operations;
+               }
                inode_nohighmem(inode);
                break;
        default:
@@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
 
        set_nlink(inode, vnode->status.nlink);
        inode->i_uid            = vnode->status.owner;
-       inode->i_gid            = GLOBAL_ROOT_GID;
+       inode->i_gid            = vnode->status.group;
        inode->i_size           = vnode->status.size;
-       inode->i_ctime.tv_sec   = vnode->status.mtime_server;
+       inode->i_ctime.tv_sec   = vnode->status.mtime_client;
        inode->i_ctime.tv_nsec  = 0;
        inode->i_atime          = inode->i_mtime = inode->i_ctime;
        inode->i_blocks         = 0;
        inode->i_generation     = vnode->fid.unique;
        inode->i_version        = vnode->status.data_version;
        inode->i_mapping->a_ops = &afs_fs_aops;
-
-       /* check to see whether a symbolic link is really a mountpoint */
-       if (vnode->status.type == AFS_FTYPE_SYMLINK) {
-               afs_mntpt_check_symlink(vnode, key);
-
-               if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
-                       inode->i_mode   = S_IFDIR | vnode->status.mode;
-                       inode->i_op     = &afs_mntpt_inode_operations;
-                       inode->i_fop    = &afs_mntpt_file_operations;
-               }
-       }
-
        return 0;
 }
 
@@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
                        vnode->cb_version = 0;
                        vnode->cb_expiry = 0;
                        vnode->cb_type = 0;
-                       vnode->cb_expires = get_seconds();
+                       vnode->cb_expires = ktime_get_real_seconds();
                } else {
                        vnode->cb_version = cb->version;
                        vnode->cb_expiry = cb->expiry;
                        vnode->cb_type = cb->type;
-                       vnode->cb_expires = vnode->cb_expiry + get_seconds();
+                       vnode->cb_expires = vnode->cb_expiry +
+                               ktime_get_real_seconds();
                }
        }
 
@@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
            !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
            !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
            !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
-               if (vnode->cb_expires < get_seconds() + 10) {
+               if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
                        _debug("callback expired");
                        set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
                } else {
@@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode)
 
        mutex_lock(&vnode->permits_lock);
        permits = vnode->permits;
-       rcu_assign_pointer(vnode->permits, NULL);
+       RCU_INIT_POINTER(vnode->permits, NULL);
        mutex_unlock(&vnode->permits_lock);
        if (permits)
                call_rcu(&permits->rcu, afs_zap_permits);
index 5dfa569..a690136 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 #include <linux/rxrpc.h>
@@ -90,7 +91,10 @@ struct afs_call {
        unsigned                request_size;   /* size of request data */
        unsigned                reply_max;      /* maximum size of reply */
        unsigned                first_offset;   /* offset into mapping[first] */
-       unsigned                last_to;        /* amount of mapping[last] */
+       union {
+               unsigned        last_to;        /* amount of mapping[last] */
+               unsigned        count2;         /* count used in unmarshalling */
+       };
        unsigned char           unmarshall;     /* unmarshalling phase */
        bool                    incoming;       /* T if incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
@@ -127,12 +131,11 @@ struct afs_call_type {
  */
 struct afs_read {
        loff_t                  pos;            /* Where to start reading */
-       loff_t                  len;            /* How much to read */
+       loff_t                  len;            /* How much we're asking for */
        loff_t                  actual_len;     /* How much we're actually getting */
+       loff_t                  remain;         /* Amount remaining */
        atomic_t                usage;
-       unsigned int            remain;         /* Amount remaining */
        unsigned int            index;          /* Which page we're reading into */
-       unsigned int            pg_offset;      /* Offset in page we're at */
        unsigned int            nr_pages;
        void (*page_done)(struct afs_call *, struct afs_read *);
        struct page             *pages[];
@@ -247,7 +250,7 @@ struct afs_cache_vhash {
  */
 struct afs_vlocation {
        atomic_t                usage;
-       time_t                  time_of_death;  /* time at which put reduced usage to 0 */
+       time64_t                time_of_death;  /* time at which put reduced usage to 0 */
        struct list_head        link;           /* link in cell volume location list */
        struct list_head        grave;          /* link in master graveyard list */
        struct list_head        update;         /* link in master update list */
@@ -258,7 +261,7 @@ struct afs_vlocation {
        struct afs_cache_vlocation vldb;        /* volume information DB record */
        struct afs_volume       *vols[3];       /* volume access record pointer (index by type) */
        wait_queue_head_t       waitq;          /* status change waitqueue */
-       time_t                  update_at;      /* time at which record should be updated */
+       time64_t                update_at;      /* time at which record should be updated */
        spinlock_t              lock;           /* access lock */
        afs_vlocation_state_t   state;          /* volume location state */
        unsigned short          upd_rej_cnt;    /* ENOMEDIUM count during update */
@@ -271,7 +274,7 @@ struct afs_vlocation {
  */
 struct afs_server {
        atomic_t                usage;
-       time_t                  time_of_death;  /* time at which put reduced usage to 0 */
+       time64_t                time_of_death;  /* time at which put reduced usage to 0 */
        struct in_addr          addr;           /* server address */
        struct afs_cell         *cell;          /* cell in which server resides */
        struct list_head        link;           /* link in cell's server list */
@@ -374,8 +377,8 @@ struct afs_vnode {
        struct rb_node          server_rb;      /* link in server->fs_vnodes */
        struct rb_node          cb_promise;     /* link in server->cb_promises */
        struct work_struct      cb_broken_work; /* work to be done on callback break */
-       time_t                  cb_expires;     /* time at which callback expires */
-       time_t                  cb_expires_at;  /* time used to order cb_promise */
+       time64_t                cb_expires;     /* time at which callback expires */
+       time64_t                cb_expires_at;  /* time used to order cb_promise */
        unsigned                cb_version;     /* callback version */
        unsigned                cb_expiry;      /* callback expiry time */
        afs_callback_type_t     cb_type;        /* type of callback */
@@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations;
 extern const struct file_operations afs_mntpt_file_operations;
 
 extern struct vfsmount *afs_d_automount(struct path *);
-extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
 extern void afs_mntpt_kill_timer(void);
 
 /*
@@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
 extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
 extern int afs_writeback_all(struct afs_vnode *);
+extern int afs_flush(struct file *, fl_owner_t);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
 
index 91ea1aa..100b207 100644 (file)
@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
        case RXKADDATALEN:      return -EKEYREJECTED;
        case RXKADILLEGALLEVEL: return -EKEYREJECTED;
 
+       case RXGEN_OPCODE:      return -ENOTSUPP;
+
        default:                return -EREMOTEIO;
        }
 }
index d4fb0af..bd3b65c 100644 (file)
@@ -46,59 +46,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
 
 static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
 
-/*
- * check a symbolic link to see whether it actually encodes a mountpoint
- * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
- */
-int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
-{
-       struct page *page;
-       size_t size;
-       char *buf;
-       int ret;
-
-       _enter("{%x:%u,%u}",
-              vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
-
-       /* read the contents of the symlink into the pagecache */
-       page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
-                              afs_page_filler, key);
-       if (IS_ERR(page)) {
-               ret = PTR_ERR(page);
-               goto out;
-       }
-
-       ret = -EIO;
-       if (PageError(page))
-               goto out_free;
-
-       buf = kmap(page);
-
-       /* examine the symlink's contents */
-       size = vnode->status.size;
-       _debug("symlink to %*.*s", (int) size, (int) size, buf);
-
-       if (size > 2 &&
-           (buf[0] == '%' || buf[0] == '#') &&
-           buf[size - 1] == '.'
-           ) {
-               _debug("symlink is a mountpoint");
-               spin_lock(&vnode->lock);
-               set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
-               vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
-               spin_unlock(&vnode->lock);
-       }
-
-       ret = 0;
-
-       kunmap(page);
-out_free:
-       put_page(page);
-out:
-       _leave(" = %d", ret);
-       return ret;
-}
-
 /*
  * no valid lookup procedure on this sort of dir
  */
index 419ef05..8f76b13 100644 (file)
@@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call)
        call->buffer = NULL;
 }
 
+#define AFS_BVEC_MAX 8
+
+/*
+ * Load the given bvec with the next few pages.
+ */
+static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
+                         struct bio_vec *bv, pgoff_t first, pgoff_t last,
+                         unsigned offset)
+{
+       struct page *pages[AFS_BVEC_MAX];
+       unsigned int nr, n, i, to, bytes = 0;
+
+       nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
+       n = find_get_pages_contig(call->mapping, first, nr, pages);
+       ASSERTCMP(n, ==, nr);
+
+       msg->msg_flags |= MSG_MORE;
+       for (i = 0; i < nr; i++) {
+               to = PAGE_SIZE;
+               if (first + i >= last) {
+                       to = call->last_to;
+                       msg->msg_flags &= ~MSG_MORE;
+               }
+               bv[i].bv_page = pages[i];
+               bv[i].bv_len = to - offset;
+               bv[i].bv_offset = offset;
+               bytes += to - offset;
+               offset = 0;
+       }
+
+       iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+}
+
 /*
  * attach the data from a bunch of pages on an inode to a call
  */
 static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
 {
-       struct page *pages[8];
-       unsigned count, n, loop, offset, to;
+       struct bio_vec bv[AFS_BVEC_MAX];
+       unsigned int bytes, nr, loop, offset;
        pgoff_t first = call->first, last = call->last;
        int ret;
 
-       _enter("");
-
        offset = call->first_offset;
        call->first_offset = 0;
 
        do {
-               _debug("attach %lx-%lx", first, last);
-
-               count = last - first + 1;
-               if (count > ARRAY_SIZE(pages))
-                       count = ARRAY_SIZE(pages);
-               n = find_get_pages_contig(call->mapping, first, count, pages);
-               ASSERTCMP(n, ==, count);
-
-               loop = 0;
-               do {
-                       struct bio_vec bvec = {.bv_page = pages[loop],
-                                              .bv_offset = offset};
-                       msg->msg_flags = 0;
-                       to = PAGE_SIZE;
-                       if (first + loop >= last)
-                               to = call->last_to;
-                       else
-                               msg->msg_flags = MSG_MORE;
-                       bvec.bv_len = to - offset;
-                       offset = 0;
-
-                       _debug("- range %u-%u%s",
-                              offset, to, msg->msg_flags ? " [more]" : "");
-                       iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
-                                     &bvec, 1, to - offset);
-
-                       /* have to change the state *before* sending the last
-                        * packet as RxRPC might give us the reply before it
-                        * returns from sending the request */
-                       if (first + loop >= last)
-                               call->state = AFS_CALL_AWAIT_REPLY;
-                       ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
-                                                    msg, to - offset);
-                       if (ret < 0)
-                               break;
-               } while (++loop < count);
-               first += count;
-
-               for (loop = 0; loop < count; loop++)
-                       put_page(pages[loop]);
+               afs_load_bvec(call, msg, bv, first, last, offset);
+               offset = 0;
+               bytes = msg->msg_iter.count;
+               nr = msg->msg_iter.nr_segs;
+
+               /* Have to change the state *before* sending the last
+                * packet as RxRPC might give us the reply before it
+                * returns from sending the request.
+                */
+               if (first + nr - 1 >= last)
+                       call->state = AFS_CALL_AWAIT_REPLY;
+               ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
+                                            msg, bytes);
+               for (loop = 0; loop < nr; loop++)
+                       put_page(bv[loop].bv_page);
                if (ret < 0)
                        break;
+
+               first += nr;
        } while (first <= last);
 
-       _leave(" = %d", ret);
        return ret;
 }
 
@@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        struct rxrpc_call *rxcall;
        struct msghdr msg;
        struct kvec iov[1];
+       size_t offset;
+       u32 abort_code;
        int ret;
 
        _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        msg.msg_controllen      = 0;
        msg.msg_flags           = (call->send_pages ? MSG_MORE : 0);
 
-       /* have to change the state *before* sending the last packet as RxRPC
-        * might give us the reply before it returns from sending the
-        * request */
+       /* We have to change the state *before* sending the last packet as
+        * rxrpc might give us the reply before it returns from sending the
+        * request.  Further, if the send fails, we may already have been given
+        * a notification and may have collected it.
+        */
        if (!call->send_pages)
                call->state = AFS_CALL_AWAIT_REPLY;
        ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        return afs_wait_for_call_to_complete(call);
 
 error_do_abort:
-       rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
+       call->state = AFS_CALL_COMPLETE;
+       if (ret != -ECONNABORTED) {
+               rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
+                                       -ret, "KSD");
+       } else {
+               abort_code = 0;
+               offset = 0;
+               rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
+                                      false, &abort_code);
+               ret = call->type->abort_to_error(abort_code);
+       }
 error_kill_call:
        afs_put_call(call);
        _leave(" = %d", ret);
@@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -EINPROGRESS:
                case -EAGAIN:
                        goto out;
+               case -ECONNABORTED:
+                       goto call_complete;
                case -ENOTCONN:
                        abort_code = RX_CALL_DEAD;
                        rxrpc_kernel_abort_call(afs_socket, call->rxcall,
                                                abort_code, -ret, "KNC");
-                       goto do_abort;
+                       goto save_error;
                case -ENOTSUPP:
-                       abort_code = RX_INVALID_OPERATION;
+                       abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(afs_socket, call->rxcall,
                                                abort_code, -ret, "KIV");
-                       goto do_abort;
+                       goto save_error;
                case -ENODATA:
                case -EBADMSG:
                case -EMSGSIZE:
@@ -471,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(afs_socket, call->rxcall,
                                                abort_code, EBADMSG, "KUM");
-                       goto do_abort;
+                       goto save_error;
                }
        }
 
@@ -482,8 +505,9 @@ out:
        _leave("");
        return;
 
-do_abort:
+save_error:
        call->error = ret;
+call_complete:
        call->state = AFS_CALL_COMPLETE;
        goto done;
 }
@@ -493,7 +517,6 @@ do_abort:
  */
 static int afs_wait_for_call_to_complete(struct afs_call *call)
 {
-       const char *abort_why;
        int ret;
 
        DECLARE_WAITQUEUE(myself, current);
@@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
                        continue;
                }
 
-               abort_why = "KWC";
-               ret = call->error;
-               if (call->state == AFS_CALL_COMPLETE)
-                       break;
-               abort_why = "KWI";
-               ret = -EINTR;
-               if (signal_pending(current))
+               if (call->state == AFS_CALL_COMPLETE ||
+                   signal_pending(current))
                        break;
                schedule();
        }
@@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        remove_wait_queue(&call->waitq, &myself);
        __set_current_state(TASK_RUNNING);
 
-       /* kill the call */
+       /* Kill off the call if it's still live. */
        if (call->state < AFS_CALL_COMPLETE) {
-               _debug("call incomplete");
+               _debug("call interrupted");
                rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-                                       RX_CALL_DEAD, -ret, abort_why);
+                                       RX_USER_ABORT, -EINTR, "KWI");
        }
 
+       ret = call->error;
        _debug("call complete");
        afs_put_call(call);
        _leave(" = %d", ret);
index 8d01042..ecb86a6 100644 (file)
@@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
 
        mutex_lock(&vnode->permits_lock);
        permits = vnode->permits;
-       rcu_assign_pointer(vnode->permits, NULL);
+       RCU_INIT_POINTER(vnode->permits, NULL);
        mutex_unlock(&vnode->permits_lock);
 
        if (permits)
@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
        } else {
                if (!(access & AFS_ACE_LOOKUP))
                        goto permission_denied;
+               if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
+                       goto permission_denied;
                if (mask & (MAY_EXEC | MAY_READ)) {
                        if (!(access & AFS_ACE_READ))
                                goto permission_denied;
+                       if (!(inode->i_mode & S_IRUSR))
+                               goto permission_denied;
                } else if (mask & MAY_WRITE) {
                        if (!(access & AFS_ACE_WRITE))
                                goto permission_denied;
+                       if (!(inode->i_mode & S_IWUSR))
+                               goto permission_denied;
                }
        }
 
        key_put(key);
-       ret = generic_permission(inode, mask);
        _leave(" = %d", ret);
        return ret;
 
index d4066ab..c001b1f 100644 (file)
@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
        spin_lock(&afs_server_graveyard_lock);
        if (atomic_read(&server->usage) == 0) {
                list_move_tail(&server->grave, &afs_server_graveyard);
-               server->time_of_death = get_seconds();
+               server->time_of_death = ktime_get_real_seconds();
                queue_delayed_work(afs_wq, &afs_server_reaper,
                                   afs_server_timeout * HZ);
        }
@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
        LIST_HEAD(corpses);
        struct afs_server *server;
        unsigned long delay, expiry;
-       time_t now;
+       time64_t now;
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
        spin_lock(&afs_server_graveyard_lock);
 
        while (!list_empty(&afs_server_graveyard)) {
index d7d8dd8..37b7c3b 100644 (file)
@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
        struct afs_vlocation *xvl;
 
        /* wait at least 10 minutes before updating... */
-       vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+       vl->update_at = ktime_get_real_seconds() +
+                       afs_vlocation_update_timeout;
 
        spin_lock(&afs_vlocation_updates_lock);
 
@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
        if (atomic_read(&vl->usage) == 0) {
                _debug("buried");
                list_move_tail(&vl->grave, &afs_vlocation_graveyard);
-               vl->time_of_death = get_seconds();
+               vl->time_of_death = ktime_get_real_seconds();
                queue_delayed_work(afs_wq, &afs_vlocation_reap,
                                   afs_vlocation_timeout * HZ);
 
@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
        LIST_HEAD(corpses);
        struct afs_vlocation *vl;
        unsigned long delay, expiry;
-       time_t now;
+       time64_t now;
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
        spin_lock(&afs_vlocation_graveyard_lock);
 
        while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
 {
        struct afs_cache_vlocation vldb;
        struct afs_vlocation *vl, *xvl;
-       time_t now;
+       time64_t now;
        long timeout;
        int ret;
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_real_seconds();
 
        /* find a record to update */
        spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
 
        /* and then reschedule */
        _debug("reschedule");
-       vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+       vl->update_at = ktime_get_real_seconds() +
+                       afs_vlocation_update_timeout;
 
        spin_lock(&afs_vlocation_updates_lock);
 
index c83c1a0..2d2fccd 100644 (file)
@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
  * partly or wholly fill a page that's under preparation for writing
  */
 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
-                        loff_t pos, struct page *page)
+                        loff_t pos, unsigned int len, struct page *page)
 {
        struct afs_read *req;
-       loff_t i_size;
        int ret;
 
        _enter(",,%llu", (unsigned long long)pos);
@@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
 
        atomic_set(&req->usage, 1);
        req->pos = pos;
+       req->len = len;
        req->nr_pages = 1;
        req->pages[0] = page;
-
-       i_size = i_size_read(&vnode->vfs_inode);
-       if (pos + PAGE_SIZE > i_size)
-               req->len = i_size - pos;
-       else
-               req->len = PAGE_SIZE;
+       get_page(page);
 
        ret = afs_vnode_fetch_data(vnode, key, req);
        afs_put_read(req);
@@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
                kfree(candidate);
                return -ENOMEM;
        }
-       *pagep = page;
-       /* page won't leak in error case: it eventually gets cleaned off LRU */
 
        if (!PageUptodate(page) && len != PAGE_SIZE) {
-               ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
+               ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
                if (ret < 0) {
+                       unlock_page(page);
+                       put_page(page);
                        kfree(candidate);
                        _leave(" = %d [prep]", ret);
                        return ret;
@@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
                SetPageUptodate(page);
        }
 
+       /* page won't leak in error case: it eventually gets cleaned off LRU */
+       *pagep = page;
+
 try_again:
        spin_lock(&vnode->writeback_lock);
 
@@ -233,7 +231,7 @@ flush_conflicting_wb:
        if (wb->state == AFS_WBACK_PENDING)
                wb->state = AFS_WBACK_CONFLICTING;
        spin_unlock(&vnode->writeback_lock);
-       if (PageDirty(page)) {
+       if (clear_page_dirty_for_io(page)) {
                ret = afs_write_back_from_locked_page(wb, page);
                if (ret < 0) {
                        afs_put_writeback(candidate);
@@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                  struct page *page, void *fsdata)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct key *key = file->private_data;
        loff_t i_size, maybe_i_size;
+       int ret;
 
        _enter("{%x:%u},{%lx}",
               vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                spin_unlock(&vnode->writeback_lock);
        }
 
+       if (!PageUptodate(page)) {
+               if (copied < len) {
+                       /* Try and load any missing data from the server.  The
+                        * unmarshalling routine will take care of clearing any
+                        * bits that are beyond the EOF.
+                        */
+                       ret = afs_fill_page(vnode, key, pos + copied,
+                                           len - copied, page);
+                       if (ret < 0)
+                               return ret;
+               }
+               SetPageUptodate(page);
+       }
+
        set_page_dirty(page);
        if (PageDirty(page))
                _debug("dirtied");
@@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
                ASSERTCMP(pv.nr, ==, count);
 
                for (loop = 0; loop < count; loop++) {
-                       ClearPageUptodate(pv.pages[loop]);
+                       struct page *page = pv.pages[loop];
+                       ClearPageUptodate(page);
                        if (error)
-                               SetPageError(pv.pages[loop]);
-                       end_page_writeback(pv.pages[loop]);
+                               SetPageError(page);
+                       if (PageWriteback(page))
+                               end_page_writeback(page);
+                       if (page->index >= first)
+                               first = page->index + 1;
                }
 
                __pagevec_release(&pv);
@@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
        _enter(",%lx", primary_page->index);
 
        count = 1;
-       if (!clear_page_dirty_for_io(primary_page))
-               BUG();
        if (test_set_page_writeback(primary_page))
                BUG();
 
@@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping,
                 */
                lock_page(page);
 
-               if (page->mapping != mapping) {
+               if (page->mapping != mapping || !PageDirty(page)) {
                        unlock_page(page);
                        put_page(page);
                        continue;
                }
 
-               if (wbc->sync_mode != WB_SYNC_NONE)
-                       wait_on_page_writeback(page);
-
-               if (PageWriteback(page) || !PageDirty(page)) {
+               if (PageWriteback(page)) {
                        unlock_page(page);
+                       if (wbc->sync_mode != WB_SYNC_NONE)
+                               wait_on_page_writeback(page);
+                       put_page(page);
                        continue;
                }
 
@@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping,
                wb->state = AFS_WBACK_WRITING;
                spin_unlock(&wb->vnode->writeback_lock);
 
+               if (!clear_page_dirty_for_io(page))
+                       BUG();
                ret = afs_write_back_from_locked_page(wb, page);
                unlock_page(page);
                put_page(page);
@@ -745,6 +763,20 @@ out:
        return ret;
 }
 
+/*
+ * Flush out all outstanding writes on a file opened for writing when it is
+ * closed.
+ */
+int afs_flush(struct file *file, fl_owner_t id)
+{
+       _enter("");
+
+       if ((file->f_mode & FMODE_WRITE) == 0)
+               return 0;
+
+       return vfs_fsync(file, 0);
+}
+
 /*
  * notification that a previously read-only page is about to become writable
  * - if it returns an error, the caller will deliver a bus error signal
index 28e8192..8df7974 100644 (file)
@@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping,
                         * can we find nothing at @index.
                         */
                        ASSERT(page_ops & PAGE_LOCK);
-                       return ret;
+                       err = -EAGAIN;
+                       goto out;
                }
 
                for (i = 0; i < ret; i++) {
index c40060c..2315039 100644 (file)
@@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
        max_size = min_t(unsigned long, PAGE_SIZE, max_size);
        ret = btrfs_decompress(compress_type, tmp, page,
                               extent_offset, inline_size, max_size);
+
+       /*
+        * decompression code contains a memset to fill in any space between the end
+        * of the uncompressed data and the end of max_size in case the decompressed
+        * data ends up shorter than ram_bytes.  That doesn't cover the hole between
+        * the end of an inline extent and the beginning of the next block, so we
+        * cover that region here.
+        */
+
+       if (max_size + pg_offset < PAGE_SIZE) {
+               char *map = kmap(page);
+               memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
+               kunmap(page);
+       }
        kfree(tmp);
        return ret;
 }
index a77df37..ee2d0a4 100644 (file)
@@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
        si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
        si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
        si->base_mem += NM_I(sbi)->nat_blocks / 8;
+       si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
 
 get_cache:
        si->cache_mem = 0;
index 4650c9b..8d5c62b 100644 (file)
@@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
        dentry_blk = page_address(page);
        bit_pos = dentry - dentry_blk->dentry;
        for (i = 0; i < slots; i++)
-               clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+               __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
 
        /* Let's check and deallocate this dentry page */
        bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
index e849f83..0a6e115 100644 (file)
@@ -561,6 +561,8 @@ struct f2fs_nm_info {
        struct mutex build_lock;        /* lock for build free nids */
        unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
        unsigned char *nat_block_bitmap;
+       unsigned short *free_nid_count; /* free nid count of NAT block */
+       spinlock_t free_nid_lock;       /* protect updating of nid count */
 
        /* for checkpoint */
        char *nat_bitmap;               /* NAT bitmap pointer */
index 9496717..481aa8d 100644 (file)
@@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
                set_nat_flag(e, IS_CHECKPOINTED, false);
        __set_nat_cache_dirty(nm_i, e);
 
-       if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
-               clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
-
        /* update fsync_mark if its inode nat entry is still alive */
        if (ni->nid != ni->ino)
                e = __lookup_nat_cache(nm_i, ni->ino);
@@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
                kmem_cache_free(free_nid_slab, i);
 }
 
-void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+                       bool set, bool build, bool locked)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
@@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
                return;
 
        if (set)
-               set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+               __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
        else
-               clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+               __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+
+       if (!locked)
+               spin_lock(&nm_i->free_nid_lock);
+       if (set)
+               nm_i->free_nid_count[nat_ofs]++;
+       else if (!build)
+               nm_i->free_nid_count[nat_ofs]--;
+       if (!locked)
+               spin_unlock(&nm_i->free_nid_lock);
 }
 
 static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
        unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
        int i;
 
-       set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
+       if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+               return;
+
+       __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
 
        i = start_nid % NAT_ENTRY_PER_BLOCK;
 
@@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
                f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
                if (blk_addr == NULL_ADDR)
                        freed = add_free_nid(sbi, start_nid, true);
-               update_free_nid_bitmap(sbi, start_nid, freed);
+               update_free_nid_bitmap(sbi, start_nid, freed, true, false);
        }
 }
 
@@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
        for (i = 0; i < nm_i->nat_blocks; i++) {
                if (!test_bit_le(i, nm_i->nat_block_bitmap))
                        continue;
+               if (!nm_i->free_nid_count[i])
+                       continue;
                for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
                        nid_t nid;
 
@@ -1907,58 +1919,6 @@ out:
        up_read(&nm_i->nat_tree_lock);
 }
 
-static int scan_nat_bits(struct f2fs_sb_info *sbi)
-{
-       struct f2fs_nm_info *nm_i = NM_I(sbi);
-       struct page *page;
-       unsigned int i = 0;
-       nid_t nid;
-
-       if (!enabled_nat_bits(sbi, NULL))
-               return -EAGAIN;
-
-       down_read(&nm_i->nat_tree_lock);
-check_empty:
-       i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
-       if (i >= nm_i->nat_blocks) {
-               i = 0;
-               goto check_partial;
-       }
-
-       for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
-                                                                       nid++) {
-               if (unlikely(nid >= nm_i->max_nid))
-                       break;
-               add_free_nid(sbi, nid, true);
-       }
-
-       if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
-               goto out;
-       i++;
-       goto check_empty;
-
-check_partial:
-       i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
-       if (i >= nm_i->nat_blocks) {
-               disable_nat_bits(sbi, true);
-               up_read(&nm_i->nat_tree_lock);
-               return -EINVAL;
-       }
-
-       nid = i * NAT_ENTRY_PER_BLOCK;
-       page = get_current_nat_page(sbi, nid);
-       scan_nat_page(sbi, page, nid);
-       f2fs_put_page(page, 1);
-
-       if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
-               i++;
-               goto check_partial;
-       }
-out:
-       up_read(&nm_i->nat_tree_lock);
-       return 0;
-}
-
 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 
                if (nm_i->nid_cnt[FREE_NID_LIST])
                        return;
-
-               /* try to find free nids with nat_bits */
-               if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
-                       return;
-       }
-
-       /* find next valid candidate */
-       if (enabled_nat_bits(sbi, NULL)) {
-               int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
-                                       nm_i->nat_blocks, 0);
-
-               if (idx >= nm_i->nat_blocks)
-                       set_sbi_flag(sbi, SBI_NEED_FSCK);
-               else
-                       nid = idx * NAT_ENTRY_PER_BLOCK;
        }
 
        /* readahead nat pages to be scanned */
@@ -2081,7 +2026,7 @@ retry:
                __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
                nm_i->available_nids--;
 
-               update_free_nid_bitmap(sbi, *nid, false);
+               update_free_nid_bitmap(sbi, *nid, false, false, false);
 
                spin_unlock(&nm_i->nid_list_lock);
                return true;
@@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
 
        nm_i->available_nids++;
 
-       update_free_nid_bitmap(sbi, nid, true);
+       update_free_nid_bitmap(sbi, nid, true, false, false);
 
        spin_unlock(&nm_i->nid_list_lock);
 
@@ -2383,7 +2328,7 @@ add_out:
        list_add_tail(&nes->set_list, head);
 }
 
-void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
                                                struct page *page)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
                        valid++;
        }
        if (valid == 0) {
-               set_bit_le(nat_index, nm_i->empty_nat_bits);
-               clear_bit_le(nat_index, nm_i->full_nat_bits);
+               __set_bit_le(nat_index, nm_i->empty_nat_bits);
+               __clear_bit_le(nat_index, nm_i->full_nat_bits);
                return;
        }
 
-       clear_bit_le(nat_index, nm_i->empty_nat_bits);
+       __clear_bit_le(nat_index, nm_i->empty_nat_bits);
        if (valid == NAT_ENTRY_PER_BLOCK)
-               set_bit_le(nat_index, nm_i->full_nat_bits);
+               __set_bit_le(nat_index, nm_i->full_nat_bits);
        else
-               clear_bit_le(nat_index, nm_i->full_nat_bits);
+               __clear_bit_le(nat_index, nm_i->full_nat_bits);
 }
 
 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
                        add_free_nid(sbi, nid, false);
                        spin_lock(&NM_I(sbi)->nid_list_lock);
                        NM_I(sbi)->available_nids++;
-                       update_free_nid_bitmap(sbi, nid, true);
+                       update_free_nid_bitmap(sbi, nid, true, false, false);
                        spin_unlock(&NM_I(sbi)->nid_list_lock);
                } else {
                        spin_lock(&NM_I(sbi)->nid_list_lock);
-                       update_free_nid_bitmap(sbi, nid, false);
+                       update_free_nid_bitmap(sbi, nid, false, false, false);
                        spin_unlock(&NM_I(sbi)->nid_list_lock);
                }
        }
@@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
        return 0;
 }
 
+inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       unsigned int i = 0;
+       nid_t nid, last_nid;
+
+       if (!enabled_nat_bits(sbi, NULL))
+               return;
+
+       for (i = 0; i < nm_i->nat_blocks; i++) {
+               i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
+               if (i >= nm_i->nat_blocks)
+                       break;
+
+               __set_bit_le(i, nm_i->nat_block_bitmap);
+
+               nid = i * NAT_ENTRY_PER_BLOCK;
+               last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+
+               spin_lock(&nm_i->free_nid_lock);
+               for (; nid < last_nid; nid++)
+                       update_free_nid_bitmap(sbi, nid, true, true, true);
+               spin_unlock(&nm_i->free_nid_lock);
+       }
+
+       for (i = 0; i < nm_i->nat_blocks; i++) {
+               i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
+               if (i >= nm_i->nat_blocks)
+                       break;
+
+               __set_bit_le(i, nm_i->nat_block_bitmap);
+       }
+}
+
 static int init_node_manager(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
@@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        return 0;
 }
 
-int init_free_nid_cache(struct f2fs_sb_info *sbi)
+static int init_free_nid_cache(struct f2fs_sb_info *sbi)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
 
@@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
                                                                GFP_KERNEL);
        if (!nm_i->nat_block_bitmap)
                return -ENOMEM;
+
+       nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks *
+                                       sizeof(unsigned short), GFP_KERNEL);
+       if (!nm_i->free_nid_count)
+               return -ENOMEM;
+
+       spin_lock_init(&nm_i->free_nid_lock);
+
        return 0;
 }
 
@@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi)
        if (err)
                return err;
 
+       /* load free nid status from nat_bits table */
+       load_free_nid_bitmap(sbi);
+
        build_free_nids(sbi, true, true);
        return 0;
 }
@@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
 
        kvfree(nm_i->nat_block_bitmap);
        kvfree(nm_i->free_nid_bitmap);
+       kvfree(nm_i->free_nid_count);
 
        kfree(nm_i->nat_bitmap);
        kfree(nm_i->nat_bits);
index 4bd7a8b..29ef708 100644 (file)
@@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
                if (f2fs_discard_en(sbi) &&
                        !f2fs_test_and_set_bit(offset, se->discard_map))
                        sbi->discard_blks--;
+
+               /* don't overwrite by SSR to keep node chain */
+               if (se->type == CURSEG_WARM_NODE) {
+                       if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
+                               se->ckpt_valid_blocks++;
+               }
        } else {
                if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
 #ifdef CONFIG_F2FS_CHECK_FS
index bb79972..7737745 100644 (file)
@@ -232,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = {
        .svo_module             = THIS_MODULE,
 };
 
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
        [0] = &nfs40_cb_sv_ops,
        [1] = &nfs41_cb_sv_ops,
 };
 #else
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
        [0] = &nfs40_cb_sv_ops,
        [1] = NULL,
 };
index 91a8d61..390ada8 100644 (file)
@@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
        return NULL;
 }
 
-static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+/*
+ * Return true if @clp is done initializing, false if still working on it.
+ *
+ * Use nfs_client_init_status to check if it was successful.
+ */
+bool nfs_client_init_is_complete(const struct nfs_client *clp)
 {
        return clp->cl_cons_state <= NFS_CS_READY;
 }
+EXPORT_SYMBOL_GPL(nfs_client_init_is_complete);
+
+/*
+ * Return 0 if @clp was successfully initialized, -errno otherwise.
+ *
+ * This must be called *after* nfs_client_init_is_complete() returns true,
+ * otherwise it will pop WARN_ON_ONCE and return -EINVAL
+ */
+int nfs_client_init_status(const struct nfs_client *clp)
+{
+       /* called without checking nfs_client_init_is_complete */
+       if (clp->cl_cons_state > NFS_CS_READY) {
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+       return clp->cl_cons_state;
+}
+EXPORT_SYMBOL_GPL(nfs_client_init_status);
 
 int nfs_wait_client_init_complete(const struct nfs_client *clp)
 {
index f956ca2..d913e81 100644 (file)
@@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
        struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
        struct nfs4_pnfs_ds *ret = ds;
        struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
+       int status;
 
        if (ds == NULL) {
                printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
@@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
        if (ds->ds_clp)
                goto out_test_devid;
 
-       nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+       status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
                             dataserver_retrans, 4,
                             s->nfs_client->cl_minorversion);
+       if (status) {
+               nfs4_mark_deviceid_unavailable(devid);
+               ret = NULL;
+               goto out;
+       }
 
 out_test_devid:
        if (ret->ds_clp == NULL ||
index f4f39b0..98b34c9 100644 (file)
@@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
 static inline bool
 ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
 {
-       return nfs4_test_deviceid_unavailable(node);
+       /*
+        * Flexfiles should never mark a DS unavailable, but if it does
+        * print a (ratelimited) warning as this can affect performance.
+        */
+       if (nfs4_test_deviceid_unavailable(node)) {
+               u32 *p = (u32 *)node->deviceid.data;
+
+               pr_warn_ratelimited("NFS: flexfiles layout referencing an "
+                               "unavailable device [%x%x%x%x]\n",
+                               p[0], p[1], p[2], p[3]);
+               return true;
+       }
+       return false;
 }
 
 static inline int
index e5a6f24..85fde93 100644 (file)
@@ -384,6 +384,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
        struct inode *ino = lseg->pls_layout->plh_inode;
        struct nfs_server *s = NFS_SERVER(ino);
        unsigned int max_payload;
+       int status;
 
        if (!ff_layout_mirror_valid(lseg, mirror, true)) {
                pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -404,7 +405,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
        /* FIXME: For now we assume the server sent only one version of NFS
         * to use for the DS.
         */
-       nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+       status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
                             dataserver_retrans,
                             mirror->mirror_ds->ds_versions[0].version,
                             mirror->mirror_ds->ds_versions[0].minor_version);
@@ -420,11 +421,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        mirror->mirror_ds->ds_versions[0].wsize = max_payload;
                goto out;
        }
+out_fail:
        ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
                                 mirror, lseg->pls_range.offset,
                                 lseg->pls_range.length, NFS4ERR_NXIO,
                                 OP_ILLEGAL, GFP_NOIO);
-out_fail:
        if (fail_return || !ff_layout_has_available_ds(lseg))
                pnfs_error_mark_layout_for_return(ino, lseg);
        ds = NULL;
index 09ca509..7b38fed 100644 (file)
@@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
                                           struct nfs_fh *,
                                           struct nfs_fattr *,
                                           rpc_authflavor_t);
+extern bool nfs_client_init_is_complete(const struct nfs_client *clp);
+extern int nfs_client_init_status(const struct nfs_client *clp);
 extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
 extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
 extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
index 5ae9d64..8346ccb 100644 (file)
@@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
        server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
        server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
 
-       if (server->rsize > server_resp_sz)
+       if (!server->rsize || server->rsize > server_resp_sz)
                server->rsize = server_resp_sz;
-       if (server->wsize > server_rqst_sz)
+       if (!server->wsize || server->wsize > server_rqst_sz)
                server->wsize = server_rqst_sz;
 #endif /* CONFIG_NFS_V4_1 */
 }
index 1b18368..c780d98 100644 (file)
@@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
        if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
                return 0;
 
-       /* even though OPEN succeeded, access is denied. Close the file */
-       nfs4_close_state(state, fmode);
        return -EACCES;
 }
 
@@ -7427,11 +7425,11 @@ static void nfs4_exchange_id_release(void *data)
        struct nfs41_exchange_id_data *cdata =
                                        (struct nfs41_exchange_id_data *)data;
 
-       nfs_put_client(cdata->args.client);
        if (cdata->xprt) {
                xprt_put(cdata->xprt);
                rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
        }
+       nfs_put_client(cdata->args.client);
        kfree(cdata->res.impl_id);
        kfree(cdata->res.server_scope);
        kfree(cdata->res.server_owner);
@@ -7538,10 +7536,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
        task_setup_data.callback_data = calldata;
 
        task = rpc_run_task(&task_setup_data);
-       if (IS_ERR(task)) {
-       status = PTR_ERR(task);
-               goto out_impl_id;
-       }
+       if (IS_ERR(task))
+               return PTR_ERR(task);
 
        if (!xprt) {
                status = rpc_wait_for_completion_task(task);
@@ -7569,6 +7565,7 @@ out_server_owner:
        kfree(calldata->res.server_owner);
 out_calldata:
        kfree(calldata);
+       nfs_put_client(clp);
        goto out;
 }
 
index f0369e3..80ce289 100644 (file)
@@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
                if (len <= 0)
                        goto out;
                dprintk("%s: name=%s\n", __func__, group_name->data);
-               return NFS_ATTR_FATTR_OWNER_NAME;
+               return NFS_ATTR_FATTR_GROUP_NAME;
        } else {
                len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
                                XDR_MAX_NETOBJ);
index 63f77b4..590e1e3 100644 (file)
@@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
 struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
                                      gfp_t gfp_flags);
 void nfs4_pnfs_v3_ds_connect_unload(void);
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
                          struct nfs4_deviceid_node *devid, unsigned int timeo,
                          unsigned int retrans, u32 version, u32 minor_version);
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
index 9414b49..7250b95 100644 (file)
@@ -745,15 +745,17 @@ out:
 /*
  * Create an rpc connection to the nfs4_pnfs_ds data server.
  * Currently only supports IPv4 and IPv6 addresses.
- * If connection fails, make devid unavailable.
+ * If connection fails, make devid unavailable and return a -errno.
  */
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
                          struct nfs4_deviceid_node *devid, unsigned int timeo,
                          unsigned int retrans, u32 version, u32 minor_version)
 {
-       if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
-               int err = 0;
+       int err;
 
+again:
+       err = 0;
+       if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
                if (version == 3) {
                        err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
                                                       retrans);
@@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
                        err = -EPROTONOSUPPORT;
                }
 
-               if (err)
-                       nfs4_mark_deviceid_unavailable(devid);
                nfs4_clear_ds_conn_bit(ds);
        } else {
                nfs4_wait_ds_connect(ds);
+
+               /* what was waited on didn't connect AND didn't mark unavail */
+               if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
+                       goto again;
        }
+
+       /*
+        * At this point the ds->ds_clp should be ready, but it might have
+        * hit an error.
+        */
+       if (!err) {
+               if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
+                       WARN_ON_ONCE(ds->ds_clp ||
+                               !nfs4_test_deviceid_unavailable(devid));
+                       return -EINVAL;
+               }
+               err = nfs_client_init_status(ds->ds_clp);
+       }
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
 
index e75b056..abb2c8a 100644 (file)
@@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
                        (long long)req_offset(req));
                if (status < 0) {
                        nfs_context_set_write_error(req->wb_context, status);
-                       nfs_inode_remove_request(req);
+                       if (req->wb_page)
+                               nfs_inode_remove_request(req);
                        dprintk_cont(", error = %d\n", status);
                        goto next;
                }
@@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
                 * returned by the server against all stored verfs. */
                if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
                        /* We have a match */
-                       nfs_inode_remove_request(req);
+                       if (req->wb_page)
+                               nfs_inode_remove_request(req);
                        dprintk_cont(" OK\n");
                        goto next;
                }
index d04547f..eb00bc1 100644 (file)
@@ -125,6 +125,8 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
+               int size);
 
 /* xfs_dir2_readdir.c */
 extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
index c6809ff..96b45cd 100644 (file)
@@ -629,6 +629,93 @@ xfs_dir2_sf_check(
 }
 #endif /* DEBUG */
 
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+       struct xfs_mount                *mp,
+       struct xfs_dir2_sf_hdr          *sfp,
+       int                             size)
+{
+       struct xfs_dir2_sf_entry        *sfep;
+       struct xfs_dir2_sf_entry        *next_sfep;
+       char                            *endp;
+       const struct xfs_dir_ops        *dops;
+       xfs_ino_t                       ino;
+       int                             i;
+       int                             i8count;
+       int                             offset;
+       __uint8_t                       filetype;
+
+       dops = xfs_dir_get_ops(mp, NULL);
+
+       /*
+        * Give up if the directory is way too short.
+        */
+       XFS_WANT_CORRUPTED_RETURN(mp, size >
+                       offsetof(struct xfs_dir2_sf_hdr, parent));
+       XFS_WANT_CORRUPTED_RETURN(mp, size >=
+                       xfs_dir2_sf_hdr_size(sfp->i8count));
+
+       endp = (char *)sfp + size;
+
+       /* Check .. entry */
+       ino = dops->sf_get_parent_ino(sfp);
+       i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+       XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+       offset = dops->data_first_offset;
+
+       /* Check all reported entries */
+       sfep = xfs_dir2_sf_firstentry(sfp);
+       for (i = 0; i < sfp->count; i++) {
+               /*
+                * struct xfs_dir2_sf_entry has a variable length.
+                * Check the fixed-offset parts of the structure are
+                * within the data buffer.
+                */
+               XFS_WANT_CORRUPTED_RETURN(mp,
+                               ((char *)sfep + sizeof(*sfep)) < endp);
+
+               /* Don't allow names with known bad length. */
+               XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
+               XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+
+               /*
+                * Check that the variable-length part of the structure is
+                * within the data buffer.  The next entry starts after the
+                * name component, so nextentry is an acceptable test.
+                */
+               next_sfep = dops->sf_nextentry(sfp, sfep);
+               XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+
+               /* Check that the offsets always increase. */
+               XFS_WANT_CORRUPTED_RETURN(mp,
+                               xfs_dir2_sf_get_offset(sfep) >= offset);
+
+               /* Check the inode number. */
+               ino = dops->sf_get_ino(sfp, sfep);
+               i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+               XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+
+               /* Check the file type. */
+               filetype = dops->sf_get_ftype(sfep);
+               XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+
+               offset = xfs_dir2_sf_get_offset(sfep) +
+                               dops->data_entsize(sfep->namelen);
+
+               sfep = next_sfep;
+       }
+       XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
+       XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+
+       /* Make sure this whole thing ought to be in local format. */
+       XFS_WANT_CORRUPTED_RETURN(mp, offset +
+              (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+              (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+
+       return 0;
+}
+
 /*
  * Create a new (shortform) directory.
  */
index 25c1e07..9653e96 100644 (file)
@@ -33,6 +33,8 @@
 #include "xfs_trace.h"
 #include "xfs_attr_sf.h"
 #include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -320,6 +322,7 @@ xfs_iformat_local(
        int             whichfork,
        int             size)
 {
+       int             error;
 
        /*
         * If the size is unreasonable, then something
@@ -336,6 +339,14 @@ xfs_iformat_local(
                return -EFSCORRUPTED;
        }
 
+       if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+               error = xfs_dir2_sf_verify(ip->i_mount,
+                               (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
+                               size);
+               if (error)
+                       return error;
+       }
+
        xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
        return 0;
 }
@@ -856,7 +867,7 @@ xfs_iextents_copy(
  * In these cases, the format always takes precedence, because the
  * format indicates the current state of the fork.
  */
-void
+int
 xfs_iflush_fork(
        xfs_inode_t             *ip,
        xfs_dinode_t            *dip,
@@ -866,6 +877,7 @@ xfs_iflush_fork(
        char                    *cp;
        xfs_ifork_t             *ifp;
        xfs_mount_t             *mp;
+       int                     error;
        static const short      brootflag[2] =
                { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
        static const short      dataflag[2] =
@@ -874,7 +886,7 @@ xfs_iflush_fork(
                { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
 
        if (!iip)
-               return;
+               return 0;
        ifp = XFS_IFORK_PTR(ip, whichfork);
        /*
         * This can happen if we gave up in iformat in an error path,
@@ -882,12 +894,19 @@ xfs_iflush_fork(
         */
        if (!ifp) {
                ASSERT(whichfork == XFS_ATTR_FORK);
-               return;
+               return 0;
        }
        cp = XFS_DFORK_PTR(dip, whichfork);
        mp = ip->i_mount;
        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
        case XFS_DINODE_FMT_LOCAL:
+               if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+                       error = xfs_dir2_sf_verify(mp,
+                                       (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
+                                       ifp->if_bytes);
+                       if (error)
+                               return error;
+               }
                if ((iip->ili_fields & dataflag[whichfork]) &&
                    (ifp->if_bytes > 0)) {
                        ASSERT(ifp->if_u1.if_data != NULL);
@@ -940,6 +959,7 @@ xfs_iflush_fork(
                ASSERT(0);
                break;
        }
+       return 0;
 }
 
 /*
index 7fb8365..132dc59 100644 (file)
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
 struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
 
 int            xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-void           xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+int            xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
                                struct xfs_inode_log_item *, int);
 void           xfs_idestroy_fork(struct xfs_inode *, int);
 void           xfs_idata_realloc(struct xfs_inode *, int, int);
index 003a99b..ad9396e 100644 (file)
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
        struct xfs_da_geometry  *geo = args->geo;
 
        ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
-       /*
-        * Give up if the directory is way too short.
-        */
-       if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
-               ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
-               return -EIO;
-       }
-
        ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
        ASSERT(dp->i_df.if_u1.if_data != NULL);
 
        sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
 
-       if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
-               return -EFSCORRUPTED;
-
        /*
         * If the block number in the offset is out of range, we're done.
         */
index 7eaf1ef..c7fe2c2 100644 (file)
@@ -3475,6 +3475,7 @@ xfs_iflush_int(
        struct xfs_inode_log_item *iip = ip->i_itemp;
        struct xfs_dinode       *dip;
        struct xfs_mount        *mp = ip->i_mount;
+       int                     error;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(xfs_isiflocked(ip));
@@ -3557,9 +3558,14 @@ xfs_iflush_int(
        if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
                ip->i_d.di_flushiter = 0;
 
-       xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
-       if (XFS_IFORK_Q(ip))
-               xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+       error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+       if (error)
+               return error;
+       if (XFS_IFORK_Q(ip)) {
+               error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+               if (error)
+                       return error;
+       }
        xfs_inobp_check(mp, bp);
 
        /*
index 673acda..9b05886 100644 (file)
@@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
 }
 
 /* Validate the processor object's proc_id */
-bool acpi_processor_validate_proc_id(int proc_id);
+bool acpi_duplicate_processor_id(int proc_id);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
                 int *pcpu);
 int acpi_unmap_cpu(int cpu);
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
-void acpi_set_processor_mapping(void);
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
 #endif
index c71dd8f..c41b8d9 100644 (file)
@@ -556,7 +556,7 @@ enum ccp_engine {
  * struct ccp_cmd - CCP operation request
  * @entry: list element (ccp driver use only)
  * @work: work element used for callbacks (ccp driver use only)
- * @ccp: CCP device to be run on (ccp driver use only)
+ * @ccp: CCP device to be run on
  * @ret: operation return code (ccp driver use only)
  * @flags: cmd processing flags
  * @engine: CCP operation to perform
index 30c4570..9ef518a 100644 (file)
@@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev)
 extern void lock_device_hotplug(void);
 extern void unlock_device_hotplug(void);
 extern int lock_device_hotplug_sysfs(void);
-void assert_held_device_hotplug(void);
 extern int device_offline(struct device *dev);
 extern int device_online(struct device *dev);
 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
index 9ca23fc..6fdfc88 100644 (file)
@@ -20,6 +20,8 @@ struct sock_exterr_skb {
        struct sock_extended_err        ee;
        u16                             addr_offset;
        __be16                          port;
+       u8                              opt_stats:1,
+                                       unused:7;
 };
 
 #endif
index 2484b2f..933d936 100644 (file)
@@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
                                                struct fwnode_handle *child,
                                                enum gpiod_flags flags,
                                                const char *label);
-/* FIXME: delete this helper when users are switched over */
-static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
-                         const char *con_id, struct fwnode_handle *child)
-{
-       return devm_fwnode_get_index_gpiod_from_child(dev, con_id,
-                                                     0, child,
-                                                     GPIOD_ASIS,
-                                                     "?");
-}
 
 #else /* CONFIG_GPIOLIB */
 
@@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
        return ERR_PTR(-ENOSYS);
 }
 
-/* FIXME: delete this when all users are switched over */
-static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
-                         const char *con_id, struct fwnode_handle *child)
-{
-       return ERR_PTR(-ENOSYS);
-}
-
 #endif /* CONFIG_GPIOLIB */
 
 static inline
index 6a6de18..2e4de0d 100644 (file)
@@ -125,9 +125,16 @@ enum iommu_attr {
 };
 
 /* These are the possible reserved region types */
-#define IOMMU_RESV_DIRECT      (1 << 0)
-#define IOMMU_RESV_RESERVED    (1 << 1)
-#define IOMMU_RESV_MSI         (1 << 2)
+enum iommu_resv_type {
+       /* Memory regions which must be mapped 1:1 at all times */
+       IOMMU_RESV_DIRECT,
+       /* Arbitrary "never map this or give it to a device" address ranges */
+       IOMMU_RESV_RESERVED,
+       /* Hardware MSI region (untranslated) */
+       IOMMU_RESV_MSI,
+       /* Software-managed MSI translation window */
+       IOMMU_RESV_SW_MSI,
+};
 
 /**
  * struct iommu_resv_region - descriptor for a reserved memory region
@@ -142,7 +149,7 @@ struct iommu_resv_region {
        phys_addr_t             start;
        size_t                  length;
        int                     prot;
-       int                     type;
+       enum iommu_resv_type    type;
 };
 
 #ifdef CONFIG_IOMMU_API
@@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern int iommu_request_dm_for_dev(struct device *dev);
 extern struct iommu_resv_region *
-iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
+iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
+                       enum iommu_resv_type type);
 extern int iommu_get_group_resv_regions(struct iommu_group *group,
                                        struct list_head *head);
 
index 1c823be..5734480 100644 (file)
@@ -6,6 +6,7 @@
 struct kmem_cache;
 struct page;
 struct vm_struct;
+struct task_struct;
 
 #ifdef CONFIG_KASAN
 
index 7e66e4f..1beb1ec 100644 (file)
@@ -476,6 +476,7 @@ enum {
 enum {
        MLX4_INTERFACE_STATE_UP         = 1 << 0,
        MLX4_INTERFACE_STATE_DELETION   = 1 << 1,
+       MLX4_INTERFACE_STATE_NOWAIT     = 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
index 35d0fd7..fd0de00 100644 (file)
@@ -76,22 +76,12 @@ struct gpmc_timings;
 struct omap_nand_platform_data;
 struct omap_onenand_platform_data;
 
-#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
-extern int gpmc_nand_init(struct omap_nand_platform_data *d,
-                         struct gpmc_timings *gpmc_t);
-#else
-static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
-                                struct gpmc_timings *gpmc_t)
-{
-       return 0;
-}
-#endif
-
 #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
 #else
 #define board_onenand_data     NULL
-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
 {
+       return 0;
 }
 #endif
index 86b4ed7..96fb139 100644 (file)
@@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev)
 
 static inline int reset_control_reset(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline int reset_control_assert(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline int reset_control_deassert(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline int reset_control_status(struct reset_control *rstc)
 {
-       WARN_ON(1);
        return 0;
 }
 
 static inline void reset_control_put(struct reset_control *rstc)
 {
-       WARN_ON(1);
 }
 
 static inline int __must_check device_reset(struct device *dev)
@@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get(
                                        const char *id, int index, bool shared,
                                        bool optional)
 {
-       return ERR_PTR(-ENOTSUPP);
+       return optional ? NULL : ERR_PTR(-ENOTSUPP);
 }
 
 static inline struct reset_control *__devm_reset_control_get(
                                        struct device *dev, const char *id,
                                        int index, bool shared, bool optional)
 {
-       return ERR_PTR(-ENOTSUPP);
+       return optional ? NULL : ERR_PTR(-ENOTSUPP);
 }
 
 #endif /* CONFIG_RESET_CONTROLLER */
index 9638bfe..584f9a6 100644 (file)
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
        struct virtio_vsock_hdr hdr;
        struct work_struct work;
        struct list_head list;
+       /* socket refcnt not held, only use for cancellation */
+       struct vsock_sock *vsk;
        void *buf;
        u32 len;
        u32 off;
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
 
 struct virtio_vsock_pkt_info {
        u32 remote_cid, remote_port;
+       struct vsock_sock *vsk;
        struct msghdr *msg;
        u32 pkt_len;
        u16 type;
index f275896..f32ed9a 100644 (file)
@@ -100,6 +100,9 @@ struct vsock_transport {
        void (*destruct)(struct vsock_sock *);
        void (*release)(struct vsock_sock *);
 
+       /* Cancel all pending packets sent on vsock. */
+       int (*cancel_pkt)(struct vsock_sock *vsk);
+
        /* Connections. */
        int (*connect)(struct vsock_sock *);
 
index f540f9a..1960587 100644 (file)
@@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
                               u32 seq);
 
 /* Fake conntrack entry for untracked connections */
-DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
 static inline struct nf_conn *nf_ct_untracked_get(void)
 {
        return raw_cpu_ptr(&nf_conntrack_untracked);
index 2aa8a9d..0136028 100644 (file)
@@ -103,6 +103,35 @@ struct nft_regs {
        };
 };
 
+/* Store/load an u16 or u8 integer to/from the u32 data register.
+ *
+ * Note, when using concatenations, register allocation happens at 32-bit
+ * level. So for store instruction, pad the rest part with zero to avoid
+ * garbage values.
+ */
+
+static inline void nft_reg_store16(u32 *dreg, u16 val)
+{
+       *dreg = 0;
+       *(u16 *)dreg = val;
+}
+
+static inline void nft_reg_store8(u32 *dreg, u8 val)
+{
+       *dreg = 0;
+       *(u8 *)dreg = val;
+}
+
+static inline u16 nft_reg_load16(u32 *sreg)
+{
+       return *(u16 *)sreg;
+}
+
+static inline u8 nft_reg_load8(u32 *sreg)
+{
+       return *(u8 *)sreg;
+}
+
 static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
                                 unsigned int len)
 {
@@ -203,7 +232,6 @@ struct nft_set_elem {
 struct nft_set;
 struct nft_set_iter {
        u8              genmask;
-       bool            flush;
        unsigned int    count;
        unsigned int    skip;
        int             err;
index d150b50..97983d1 100644 (file)
@@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
                     struct sk_buff *skb,
                     const struct nf_hook_state *state)
 {
+       unsigned int flags = IP6_FH_F_AUTH;
        int protohdr, thoff = 0;
        unsigned short frag_off;
 
        nft_set_pktinfo(pkt, skb, state);
 
-       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
        if (protohdr < 0) {
                nft_set_pktinfo_proto_unspec(pkt, skb);
                return;
@@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
                                const struct nf_hook_state *state)
 {
 #if IS_ENABLED(CONFIG_IPV6)
+       unsigned int flags = IP6_FH_F_AUTH;
        struct ipv6hdr *ip6h, _ip6h;
        unsigned int thoff = 0;
        unsigned short frag_off;
@@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
        if (pkt_len + sizeof(*ip6h) > skb->len)
                return -1;
 
-       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
        if (protohdr < 0)
                return -1;
 
index 07a0b12..592dece 100644 (file)
@@ -83,6 +83,7 @@ struct sctp_bind_addr;
 struct sctp_ulpq;
 struct sctp_ep_common;
 struct crypto_shash;
+struct sctp_stream;
 
 
 #include <net/sctp/tsnmap.h>
@@ -753,6 +754,8 @@ struct sctp_transport {
                /* Is the Path MTU update pending on this tranport */
                pmtu_pending:1,
 
+               dst_pending_confirm:1,  /* need to confirm neighbour */
+
                /* Has this transport moved the ctsn since we last sacked */
                sack_generation:1;
        u32 dst_cookie;
@@ -806,8 +809,6 @@ struct sctp_transport {
 
        __u32 burst_limited;    /* Holds old cwnd when max.burst is applied */
 
-       __u32 dst_pending_confirm;      /* need to confirm neighbour */
-
        /* Destination */
        struct dst_entry *dst;
        /* Source address. */
index b54b98d..1b0f447 100644 (file)
@@ -4,7 +4,12 @@
 #include <linux/types.h>
 #include <target/target_core_base.h>
 
-#define TRANSPORT_FLAG_PASSTHROUGH             1
+#define TRANSPORT_FLAG_PASSTHROUGH             0x1
+/*
+ * ALUA commands, state checks and setup operations are handled by the
+ * backend module.
+ */
+#define TRANSPORT_FLAG_PASSTHROUGH_ALUA                0x2
 
 struct request_queue;
 struct scatterlist;
index 37c274e..4b784b6 100644 (file)
@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
        struct list_head tg_pt_gp_lun_list;
        struct se_lun *tg_pt_gp_alua_lun;
        struct se_node_acl *tg_pt_gp_alua_nacl;
-       struct delayed_work tg_pt_gp_transition_work;
+       struct work_struct tg_pt_gp_transition_work;
        struct completion *tg_pt_gp_transition_complete;
 };
 
index 9b1462e..a076cf1 100644 (file)
@@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
 __SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
 #define __NR_pkey_free 290
 __SYSCALL(__NR_pkey_free,     sys_pkey_free)
+#define __NR_statx 291
+__SYSCALL(__NR_statx,     sys_statx)
 
 #undef __NR_syscalls
-#define __NR_syscalls 291
+#define __NR_syscalls 292
 
 /*
  * All syscalls below here should go away really,
index 407cb55..7fb9786 100644 (file)
@@ -33,8 +33,8 @@ extern "C" {
 #define OMAP_PARAM_CHIPSET_ID  1       /* ie. 0x3430, 0x4430, etc */
 
 struct drm_omap_param {
-       uint64_t param;                 /* in */
-       uint64_t value;                 /* in (set_param), out (get_param) */
+       __u64 param;                    /* in */
+       __u64 value;                    /* in (set_param), out (get_param) */
 };
 
 #define OMAP_BO_SCANOUT                0x00000001      /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
 #define OMAP_BO_TILED          (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
 
 union omap_gem_size {
-       uint32_t bytes;         /* (for non-tiled formats) */
+       __u32 bytes;            /* (for non-tiled formats) */
        struct {
-               uint16_t width;
-               uint16_t height;
+               __u16 width;
+               __u16 height;
        } tiled;                /* (for tiled formats) */
 };
 
 struct drm_omap_gem_new {
        union omap_gem_size size;       /* in */
-       uint32_t flags;                 /* in */
-       uint32_t handle;                /* out */
-       uint32_t __pad;
+       __u32 flags;                    /* in */
+       __u32 handle;                   /* out */
+       __u32 __pad;
 };
 
 /* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
 };
 
 struct drm_omap_gem_cpu_prep {
-       uint32_t handle;                /* buffer handle (in) */
-       uint32_t op;                    /* mask of omap_gem_op (in) */
+       __u32 handle;                   /* buffer handle (in) */
+       __u32 op;                       /* mask of omap_gem_op (in) */
 };
 
 struct drm_omap_gem_cpu_fini {
-       uint32_t handle;                /* buffer handle (in) */
-       uint32_t op;                    /* mask of omap_gem_op (in) */
+       __u32 handle;                   /* buffer handle (in) */
+       __u32 op;                       /* mask of omap_gem_op (in) */
        /* TODO maybe here we pass down info about what regions are touched
         * by sw so we can be clever about cache ops?  For now a placeholder,
         * set to zero and we just do full buffer flush..
         */
-       uint32_t nregions;
-       uint32_t __pad;
+       __u32 nregions;
+       __u32 __pad;
 };
 
 struct drm_omap_gem_info {
-       uint32_t handle;                /* buffer handle (in) */
-       uint32_t pad;
-       uint64_t offset;                /* mmap offset (out) */
+       __u32 handle;                   /* buffer handle (in) */
+       __u32 pad;
+       __u64 offset;                   /* mmap offset (out) */
        /* note: in case of tiled buffers, the user virtual size can be
         * different from the physical size (ie. how many pages are needed
         * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
         * This size here is the one that should be used if you want to
         * mmap() the buffer:
         */
-       uint32_t size;                  /* virtual size for mmap'ing (out) */
-       uint32_t __pad;
+       __u32 size;                     /* virtual size for mmap'ing (out) */
+       __u32 __pad;
 };
 
 #define DRM_OMAP_GET_PARAM             0x00
index db4c253..dcfc3a5 100644 (file)
@@ -713,33 +713,6 @@ enum btrfs_err_code {
        BTRFS_ERROR_DEV_ONLY_WRITABLE,
        BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
 };
-/* An error code to error string mapping for the kernel
-*  error codes
-*/
-static inline char *btrfs_err_str(enum btrfs_err_code err_code)
-{
-       switch (err_code) {
-               case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET:
-                       return "unable to go below two devices on raid1";
-               case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET:
-                       return "unable to go below four devices on raid10";
-               case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET:
-                       return "unable to go below two devices on raid5";
-               case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET:
-                       return "unable to go below three devices on raid6";
-               case BTRFS_ERROR_DEV_TGT_REPLACE:
-                       return "unable to remove the dev_replace target dev";
-               case BTRFS_ERROR_DEV_MISSING_NOT_FOUND:
-                       return "no missing devices found to remove";
-               case BTRFS_ERROR_DEV_ONLY_WRITABLE:
-                       return "unable to remove the only writeable device";
-               case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS:
-                       return "add/delete/balance/replace/resize operation "\
-                               "in progress";
-               default:
-                       return NULL;
-       }
-}
 
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
                                   struct btrfs_ioctl_vol_args)
index ef8e2a8..6b083d3 100644 (file)
@@ -46,6 +46,7 @@
 #define DECON_FRAMEFIFO_STATUS         0x0524
 #define DECON_CMU                      0x1404
 #define DECON_UPDATE                   0x1410
+#define DECON_CRFMID                   0x1414
 #define DECON_UPDATE_SCHEME            0x1438
 #define DECON_VIDCON1                  0x2000
 #define DECON_VIDCON2                  0x2004
 
 /* VIDINTCON0 */
 #define VIDINTCON0_FRAMEDONE           (1 << 17)
+#define VIDINTCON0_FRAMESEL_BP         (0 << 15)
+#define VIDINTCON0_FRAMESEL_VS         (1 << 15)
+#define VIDINTCON0_FRAMESEL_AC         (2 << 15)
+#define VIDINTCON0_FRAMESEL_FP         (3 << 15)
 #define VIDINTCON0_INTFRMEN            (1 << 12)
 #define VIDINTCON0_INTEN               (1 << 0)
 
 #define STANDALONE_UPDATE_F            (1 << 0)
 
 /* DECON_VIDCON1 */
+#define VIDCON1_LINECNT_MASK           (0x0fff << 16)
+#define VIDCON1_I80_ACTIVE             (1 << 15)
+#define VIDCON1_VSTATUS_MASK           (0x3 << 13)
+#define VIDCON1_VSTATUS_VS             (0 << 13)
+#define VIDCON1_VSTATUS_BP             (1 << 13)
+#define VIDCON1_VSTATUS_AC             (2 << 13)
+#define VIDCON1_VSTATUS_FP             (3 << 13)
 #define VIDCON1_VCLK_MASK              (0x3 << 9)
 #define VIDCON1_VCLK_RUN_VDEN_DISABLE  (0x3 << 9)
 #define VIDCON1_VCLK_HOLD              (0x0 << 9)
index afe5bab..361a69d 100644 (file)
@@ -30,18 +30,12 @@ struct bpf_htab {
                struct pcpu_freelist freelist;
                struct bpf_lru lru;
        };
-       void __percpu *extra_elems;
+       struct htab_elem *__percpu *extra_elems;
        atomic_t count; /* number of elements in this hashtable */
        u32 n_buckets;  /* number of hash buckets */
        u32 elem_size;  /* size of each element in bytes */
 };
 
-enum extra_elem_state {
-       HTAB_NOT_AN_EXTRA_ELEM = 0,
-       HTAB_EXTRA_ELEM_FREE,
-       HTAB_EXTRA_ELEM_USED
-};
-
 /* each htab element is struct htab_elem + key + value */
 struct htab_elem {
        union {
@@ -56,7 +50,6 @@ struct htab_elem {
        };
        union {
                struct rcu_head rcu;
-               enum extra_elem_state state;
                struct bpf_lru_node lru_node;
        };
        u32 hash;
@@ -77,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
                htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
 }
 
+static bool htab_is_prealloc(const struct bpf_htab *htab)
+{
+       return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
+}
+
 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
                                     void __percpu *pptr)
 {
@@ -128,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
 
 static int prealloc_init(struct bpf_htab *htab)
 {
+       u32 num_entries = htab->map.max_entries;
        int err = -ENOMEM, i;
 
-       htab->elems = bpf_map_area_alloc(htab->elem_size *
-                                        htab->map.max_entries);
+       if (!htab_is_percpu(htab) && !htab_is_lru(htab))
+               num_entries += num_possible_cpus();
+
+       htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
        if (!htab->elems)
                return -ENOMEM;
 
        if (!htab_is_percpu(htab))
                goto skip_percpu_elems;
 
-       for (i = 0; i < htab->map.max_entries; i++) {
+       for (i = 0; i < num_entries; i++) {
                u32 size = round_up(htab->map.value_size, 8);
                void __percpu *pptr;
 
@@ -166,11 +167,11 @@ skip_percpu_elems:
        if (htab_is_lru(htab))
                bpf_lru_populate(&htab->lru, htab->elems,
                                 offsetof(struct htab_elem, lru_node),
-                                htab->elem_size, htab->map.max_entries);
+                                htab->elem_size, num_entries);
        else
                pcpu_freelist_populate(&htab->freelist,
                                       htab->elems + offsetof(struct htab_elem, fnode),
-                                      htab->elem_size, htab->map.max_entries);
+                                      htab->elem_size, num_entries);
 
        return 0;
 
@@ -191,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
 
 static int alloc_extra_elems(struct bpf_htab *htab)
 {
-       void __percpu *pptr;
+       struct htab_elem *__percpu *pptr, *l_new;
+       struct pcpu_freelist_node *l;
        int cpu;
 
-       pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
+       pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
+                                 GFP_USER | __GFP_NOWARN);
        if (!pptr)
                return -ENOMEM;
 
        for_each_possible_cpu(cpu) {
-               ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
-                       HTAB_EXTRA_ELEM_FREE;
+               l = pcpu_freelist_pop(&htab->freelist);
+               /* pop will succeed, since prealloc_init()
+                * preallocated extra num_possible_cpus elements
+                */
+               l_new = container_of(l, struct htab_elem, fnode);
+               *per_cpu_ptr(pptr, cpu) = l_new;
        }
        htab->extra_elems = pptr;
        return 0;
@@ -342,25 +349,25 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                raw_spin_lock_init(&htab->buckets[i].lock);
        }
 
-       if (!percpu && !lru) {
-               /* lru itself can remove the least used element, so
-                * there is no need for an extra elem during map_update.
-                */
-               err = alloc_extra_elems(htab);
-               if (err)
-                       goto free_buckets;
-       }
-
        if (prealloc) {
                err = prealloc_init(htab);
                if (err)
-                       goto free_extra_elems;
+                       goto free_buckets;
+
+               if (!percpu && !lru) {
+                       /* lru itself can remove the least used element, so
+                        * there is no need for an extra elem during map_update.
+                        */
+                       err = alloc_extra_elems(htab);
+                       if (err)
+                               goto free_prealloc;
+               }
        }
 
        return &htab->map;
 
-free_extra_elems:
-       free_percpu(htab->extra_elems);
+free_prealloc:
+       prealloc_destroy(htab);
 free_buckets:
        bpf_map_area_free(htab->buckets);
 free_htab:
@@ -575,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
 
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
-       if (l->state == HTAB_EXTRA_ELEM_USED) {
-               l->state = HTAB_EXTRA_ELEM_FREE;
-               return;
-       }
-
-       if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
+       if (htab_is_prealloc(htab)) {
                pcpu_freelist_push(&htab->freelist, &l->fnode);
        } else {
                atomic_dec(&htab->count);
@@ -610,47 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                         void *value, u32 key_size, u32 hash,
                                         bool percpu, bool onallcpus,
-                                        bool old_elem_exists)
+                                        struct htab_elem *old_elem)
 {
        u32 size = htab->map.value_size;
-       bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
-       struct htab_elem *l_new;
+       bool prealloc = htab_is_prealloc(htab);
+       struct htab_elem *l_new, **pl_new;
        void __percpu *pptr;
-       int err = 0;
 
        if (prealloc) {
-               struct pcpu_freelist_node *l;
+               if (old_elem) {
+                       /* if we're updating the existing element,
+                        * use per-cpu extra elems to avoid freelist_pop/push
+                        */
+                       pl_new = this_cpu_ptr(htab->extra_elems);
+                       l_new = *pl_new;
+                       *pl_new = old_elem;
+               } else {
+                       struct pcpu_freelist_node *l;
 
-               l = pcpu_freelist_pop(&htab->freelist);
-               if (!l)
-                       err = -E2BIG;
-               else
+                       l = pcpu_freelist_pop(&htab->freelist);
+                       if (!l)
+                               return ERR_PTR(-E2BIG);
                        l_new = container_of(l, struct htab_elem, fnode);
-       } else {
-               if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
-                       atomic_dec(&htab->count);
-                       err = -E2BIG;
-               } else {
-                       l_new = kmalloc(htab->elem_size,
-                                       GFP_ATOMIC | __GFP_NOWARN);
-                       if (!l_new)
-                               return ERR_PTR(-ENOMEM);
                }
-       }
-
-       if (err) {
-               if (!old_elem_exists)
-                       return ERR_PTR(err);
-
-               /* if we're updating the existing element and the hash table
-                * is full, use per-cpu extra elems
-                */
-               l_new = this_cpu_ptr(htab->extra_elems);
-               if (l_new->state != HTAB_EXTRA_ELEM_FREE)
-                       return ERR_PTR(-E2BIG);
-               l_new->state = HTAB_EXTRA_ELEM_USED;
        } else {
-               l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
+               if (atomic_inc_return(&htab->count) > htab->map.max_entries)
+                       if (!old_elem) {
+                               /* when map is full and update() is replacing
+                                * old element, it's ok to allocate, since
+                                * old element will be freed immediately.
+                                * Otherwise return an error
+                                */
+                               atomic_dec(&htab->count);
+                               return ERR_PTR(-E2BIG);
+                       }
+               l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+               if (!l_new)
+                       return ERR_PTR(-ENOMEM);
        }
 
        memcpy(l_new->key, key, key_size);
@@ -731,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                goto err;
 
        l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
-                               !!l_old);
+                               l_old);
        if (IS_ERR(l_new)) {
                /* all pre-allocated elements are in use or memory exhausted */
                ret = PTR_ERR(l_new);
@@ -744,7 +742,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
        hlist_nulls_add_head_rcu(&l_new->hash_node, head);
        if (l_old) {
                hlist_nulls_del_rcu(&l_old->hash_node);
-               free_htab_elem(htab, l_old);
+               if (!htab_is_prealloc(htab))
+                       free_htab_elem(htab, l_old);
        }
        ret = 0;
 err:
@@ -856,7 +855,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
                                value, onallcpus);
        } else {
                l_new = alloc_htab_elem(htab, key, value, key_size,
-                                       hash, true, onallcpus, false);
+                                       hash, true, onallcpus, NULL);
                if (IS_ERR(l_new)) {
                        ret = PTR_ERR(l_new);
                        goto err;
@@ -1024,8 +1023,7 @@ static void delete_all_elements(struct bpf_htab *htab)
 
                hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
                        hlist_nulls_del_rcu(&l->hash_node);
-                       if (l->state != HTAB_EXTRA_ELEM_USED)
-                               htab_elem_free(htab, l);
+                       htab_elem_free(htab, l);
                }
        }
 }
@@ -1045,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map)
         * not have executed. Wait for them.
         */
        rcu_barrier();
-       if (htab->map.map_flags & BPF_F_NO_PREALLOC)
+       if (!htab_is_prealloc(htab))
                delete_all_elements(htab);
        else
                prealloc_destroy(htab);
index f7c0632..37b223e 100644 (file)
@@ -1335,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
        struct cpuhp_step *sp;
        int ret = 0;
 
-       mutex_lock(&cpuhp_state_mutex);
-
        if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
                ret = cpuhp_reserve_state(state);
                if (ret < 0)
-                       goto out;
+                       return ret;
                state = ret;
        }
        sp = cpuhp_get_step(state);
-       if (name && sp->name) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (name && sp->name)
+               return -EBUSY;
+
        sp->startup.single = startup;
        sp->teardown.single = teardown;
        sp->name = name;
        sp->multi_instance = multi_instance;
        INIT_HLIST_HEAD(&sp->list);
-out:
-       mutex_unlock(&cpuhp_state_mutex);
        return ret;
 }
 
@@ -1428,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
                return -EINVAL;
 
        get_online_cpus();
+       mutex_lock(&cpuhp_state_mutex);
 
        if (!invoke || !sp->startup.multi)
                goto add_node;
@@ -1447,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
                if (ret) {
                        if (sp->teardown.multi)
                                cpuhp_rollback_install(cpu, state, node);
-                       goto err;
+                       goto unlock;
                }
        }
 add_node:
        ret = 0;
-       mutex_lock(&cpuhp_state_mutex);
        hlist_add_head(node, &sp->list);
+unlock:
        mutex_unlock(&cpuhp_state_mutex);
-
-err:
        put_online_cpus();
        return ret;
 }
@@ -1491,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
                return -EINVAL;
 
        get_online_cpus();
+       mutex_lock(&cpuhp_state_mutex);
 
        ret = cpuhp_store_callbacks(state, name, startup, teardown,
                                    multi_instance);
@@ -1524,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
                }
        }
 out:
+       mutex_unlock(&cpuhp_state_mutex);
        put_online_cpus();
        /*
         * If the requested state is CPUHP_AP_ONLINE_DYN, return the
@@ -1547,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
                return -EINVAL;
 
        get_online_cpus();
+       mutex_lock(&cpuhp_state_mutex);
+
        if (!invoke || !cpuhp_get_teardown_cb(state))
                goto remove;
        /*
@@ -1563,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
        }
 
 remove:
-       mutex_lock(&cpuhp_state_mutex);
        hlist_del(node);
        mutex_unlock(&cpuhp_state_mutex);
        put_online_cpus();
@@ -1571,6 +1568,7 @@ remove:
        return 0;
 }
 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
+
 /**
  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
  * @state:     The state to remove
@@ -1589,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
 
        get_online_cpus();
 
+       mutex_lock(&cpuhp_state_mutex);
        if (sp->multi_instance) {
                WARN(!hlist_empty(&sp->list),
                     "Error: Removing state %d which has instances left.\n",
@@ -1613,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
        }
 remove:
        cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+       mutex_unlock(&cpuhp_state_mutex);
        put_online_cpus();
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
index a17ed56..ff01cba 100644 (file)
@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event)
 
        raw_spin_lock_irq(&ctx->lock);
        /*
-        * Mark this even as STATE_DEAD, there is no external reference to it
+        * Mark this event as STATE_DEAD, there is no external reference to it
         * anymore.
         *
         * Anybody acquiring event->child_mutex after the below loop _must_
@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task)
                        continue;
 
                mutex_lock(&ctx->mutex);
-again:
-               list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
-                               group_entry)
-                       perf_free_event(event, ctx);
+               raw_spin_lock_irq(&ctx->lock);
+               /*
+                * Destroy the task <-> ctx relation and mark the context dead.
+                *
+                * This is important because even though the task hasn't been
+                * exposed yet the context has been (through child_list).
+                */
+               RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
+               WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
+               put_task_struct(task); /* cannot be last */
+               raw_spin_unlock_irq(&ctx->lock);
 
-               list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
-                               group_entry)
+               list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
                        perf_free_event(event, ctx);
 
-               if (!list_empty(&ctx->pinned_groups) ||
-                               !list_empty(&ctx->flexible_groups))
-                       goto again;
-
                mutex_unlock(&ctx->mutex);
-
                put_ctx(ctx);
        }
 }
@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 }
 
 /*
- * inherit a event from parent task to child task:
+ * Inherit a event from parent task to child task.
+ *
+ * Returns:
+ *  - valid pointer on success
+ *  - NULL for orphaned events
+ *  - IS_ERR() on error
  */
 static struct perf_event *
 inherit_event(struct perf_event *parent_event,
@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event,
        return child_event;
 }
 
+/*
+ * Inherits an event group.
+ *
+ * This will quietly suppress orphaned events; !inherit_event() is not an error.
+ * This matches with perf_event_release_kernel() removing all child events.
+ *
+ * Returns:
+ *  - 0 on success
+ *  - <0 on error
+ */
 static int inherit_group(struct perf_event *parent_event,
              struct task_struct *parent,
              struct perf_event_context *parent_ctx,
@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event,
                                 child, NULL, child_ctx);
        if (IS_ERR(leader))
                return PTR_ERR(leader);
+       /*
+        * @leader can be NULL here because of is_orphaned_event(). In this
+        * case inherit_event() will create individual events, similar to what
+        * perf_group_detach() would do anyway.
+        */
        list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
                child_ctr = inherit_event(sub, parent, parent_ctx,
                                            child, leader, child_ctx);
@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event,
        return 0;
 }
 
+/*
+ * Creates the child task context and tries to inherit the event-group.
+ *
+ * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
+ * inherited_all set when we 'fail' to inherit an orphaned event; this is
+ * consistent with perf_event_release_kernel() removing all child events.
+ *
+ * Returns:
+ *  - 0 on success
+ *  - <0 on error
+ */
 static int
 inherit_task_group(struct perf_event *event, struct task_struct *parent,
                   struct perf_event_context *parent_ctx,
@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
                 * First allocate and initialize a context for the
                 * child.
                 */
-
                child_ctx = alloc_perf_context(parent_ctx->pmu, child);
                if (!child_ctx)
                        return -ENOMEM;
@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
                if (ret)
-                       break;
+                       goto out_unlock;
        }
 
        /*
@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
                if (ret)
-                       break;
+                       goto out_unlock;
        }
 
        raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
        }
 
        raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
        mutex_unlock(&parent_ctx->mutex);
 
        perf_unpin_context(parent_ctx);
index 229a744..45858ec 100644 (file)
@@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
 {
        struct hrtimer_sleeper timeout, *to = NULL;
        struct rt_mutex_waiter rt_waiter;
-       struct rt_mutex *pi_mutex = NULL;
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
        struct futex_q q = futex_q_init;
@@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_pi_state_owner(uaddr2, &q, current);
+                       if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+                               rt_mutex_unlock(&q.pi_state->pi_mutex);
                        /*
                         * Drop the reference to the pi state which
                         * the requeue_pi() code acquired for us.
@@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                        spin_unlock(q.lock_ptr);
                }
        } else {
+               struct rt_mutex *pi_mutex;
+
                /*
                 * We have been woken up by futex_unlock_pi(), a timeout, or a
                 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (res)
                        ret = (res < 0) ? res : 0;
 
+               /*
+                * If fixup_pi_state_owner() faulted and was unable to handle
+                * the fault, unlock the rt_mutex and return the fault to
+                * userspace.
+                */
+               if (ret && rt_mutex_owner(pi_mutex) == current)
+                       rt_mutex_unlock(pi_mutex);
+
                /* Unqueue and drop the lock. */
                unqueue_me_pi(&q);
        }
 
-       /*
-        * If fixup_pi_state_owner() faulted and was unable to handle the
-        * fault, unlock the rt_mutex and return the fault to userspace.
-        */
-       if (ret == -EFAULT) {
-               if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
-                       rt_mutex_unlock(pi_mutex);
-       } else if (ret == -EINTR) {
+       if (ret == -EINTR) {
                /*
                 * We've already been requeued, but cannot restart by calling
                 * futex_lock_pi() directly. We could restart this syscall, but
index 7bc24d4..c65f798 100644 (file)
@@ -213,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
                 */
                if (sem->count == 0)
                        break;
-               if (signal_pending_state(state, current)) {
-                       ret = -EINTR;
-                       goto out;
-               }
+               if (signal_pending_state(state, current))
+                       goto out_nolock;
+
                set_current_state(state);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                schedule();
@@ -224,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
        }
        /* got the lock */
        sem->count = -1;
-out:
        list_del(&waiter.list);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        return ret;
+
+out_nolock:
+       list_del(&waiter.list);
+       if (!list_empty(&sem->wait_list))
+               __rwsem_do_wake(sem, 1);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+       return -EINTR;
 }
 
 void __sched __down_write(struct rw_semaphore *sem)
index 0612323..07e85e5 100644 (file)
@@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
 
-       lock_device_hotplug();
        mem_hotplug_begin();
        arch_remove_memory(align_start, align_size);
        mem_hotplug_done();
-       unlock_device_hotplug();
 
        untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
@@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (error)
                goto err_pfn_remap;
 
-       lock_device_hotplug();
        mem_hotplug_begin();
        error = arch_add_memory(nid, align_start, align_size, true);
        mem_hotplug_done();
-       unlock_device_hotplug();
        if (error)
                goto err_add_memory;
 
index cd7cd48..54c5775 100644 (file)
@@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy)
        for_each_cpu(cpu, policy->cpus) {
                struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
 
+               memset(sg_cpu, 0, sizeof(*sg_cpu));
                sg_cpu->sg_policy = sg_policy;
-               if (policy_is_shared(policy)) {
-                       sg_cpu->util = 0;
-                       sg_cpu->max = 0;
-                       sg_cpu->flags = SCHED_CPUFREQ_RT;
-                       sg_cpu->last_update = 0;
-                       sg_cpu->iowait_boost = 0;
-                       sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
-                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                                    sugov_update_shared);
-               } else {
-                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                                    sugov_update_single);
-               }
+               sg_cpu->flags = SCHED_CPUFREQ_RT;
+               sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+               cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+                                            policy_is_shared(policy) ?
+                                                       sugov_update_shared :
+                                                       sugov_update_single);
        }
        return 0;
 }
index 99b2c33..a2ce590 100644 (file)
@@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
  *
  * This function returns true if:
  *
- *   runtime / (deadline - t) > dl_runtime / dl_period ,
+ *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
  *
  * IOW we can't recycle current parameters.
  *
- * Notice that the bandwidth check is done against the period. For
+ * Notice that the bandwidth check is done against the deadline. For
  * task with deadline equal to period this is the same of using
- * dl_deadline instead of dl_period in the equation above.
+ * dl_period instead of dl_deadline in the equation above.
  */
 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
                               struct sched_dl_entity *pi_se, u64 t)
@@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
         * of anything below microseconds resolution is actually fiction
         * (but still we want to give the user that illusion >;).
         */
-       left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+       left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
        right = ((dl_se->deadline - t) >> DL_SCALE) *
                (pi_se->dl_runtime >> DL_SCALE);
 
@@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
        }
 }
 
+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
+{
+       return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
+}
+
 /*
  * If the entity depleted all its runtime, and if we want it to sleep
  * while waiting for some new execution time to become available, we
- * set the bandwidth enforcement timer to the replenishment instant
+ * set the bandwidth replenishment timer to the replenishment instant
  * and try to activate it.
  *
  * Notice that it is important for the caller to know if the timer
@@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p)
         * that it is actually coming from rq->clock and not from
         * hrtimer's time base reading.
         */
-       act = ns_to_ktime(dl_se->deadline);
+       act = ns_to_ktime(dl_next_period(dl_se));
        now = hrtimer_cb_get_time(timer);
        delta = ktime_to_ns(now) - rq_clock(rq);
        act = ktime_add_ns(act, delta);
@@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                lockdep_unpin_lock(&rq->lock, rf.cookie);
                rq = dl_task_offline_migration(rq, p);
                rf.cookie = lockdep_pin_lock(&rq->lock);
+               update_rq_clock(rq);
 
                /*
                 * Now that the task has been migrated to the new RQ and we
@@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
        timer->function = dl_task_timer;
 }
 
+/*
+ * During the activation, CBS checks if it can reuse the current task's
+ * runtime and period. If the deadline of the task is in the past, CBS
+ * cannot use the runtime, and so it replenishes the task. This rule
+ * works fine for implicit deadline tasks (deadline == period), and the
+ * CBS was designed for implicit deadline tasks. However, a task with
+ * constrained deadline (deadine < period) might be awakened after the
+ * deadline, but before the next period. In this case, replenishing the
+ * task would allow it to run for runtime / deadline. As in this case
+ * deadline < period, CBS enables a task to run for more than the
+ * runtime / period. In a very loaded system, this can cause a domino
+ * effect, making other tasks miss their deadlines.
+ *
+ * To avoid this problem, in the activation of a constrained deadline
+ * task after the deadline but before the next period, throttle the
+ * task and set the replenishing timer to the begin of the next period,
+ * unless it is boosted.
+ */
+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
+{
+       struct task_struct *p = dl_task_of(dl_se);
+       struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+
+       if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+           dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
+               if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+                       return;
+               dl_se->dl_throttled = 1;
+       }
+}
+
 static
 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 {
@@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
        __dequeue_dl_entity(dl_se);
 }
 
+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+{
+       return dl_se->dl_deadline < dl_se->dl_period;
+}
+
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
        struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -947,6 +989,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
                return;
        }
 
+       /*
+        * Check if a constrained deadline task was activated
+        * after the deadline but before the next period.
+        * If that is the case, the task will be throttled and
+        * the replenishment timer will be set to the next period.
+        */
+       if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+               dl_check_constrained_dl(&p->dl);
+
        /*
         * If p is throttled, we do nothing. In fact, if it exhausted
         * its budget it needs a replenishment and, since it now is on
index 7296b73..f15fb2b 100644 (file)
@@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void)
         * If the folding window started, make sure we start writing in the
         * next idle-delta.
         */
-       if (!time_before(jiffies, calc_load_update))
+       if (!time_before(jiffies, READ_ONCE(calc_load_update)))
                idx++;
 
        return idx & 1;
@@ -202,8 +202,9 @@ void calc_load_exit_idle(void)
        struct rq *this_rq = this_rq();
 
        /*
-        * If we're still before the sample window, we're done.
+        * If we're still before the pending sample window, we're done.
         */
+       this_rq->calc_load_update = READ_ONCE(calc_load_update);
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
@@ -212,7 +213,6 @@ void calc_load_exit_idle(void)
         * accounted through the nohz accounting, so skip the entire deal and
         * sync up for the next window.
         */
-       this_rq->calc_load_update = calc_load_update;
        if (time_before(jiffies, this_rq->calc_load_update + 10))
                this_rq->calc_load_update += LOAD_FREQ;
 }
@@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp,
  */
 static void calc_global_nohz(void)
 {
+       unsigned long sample_window;
        long delta, active, n;
 
-       if (!time_before(jiffies, calc_load_update + 10)) {
+       sample_window = READ_ONCE(calc_load_update);
+       if (!time_before(jiffies, sample_window + 10)) {
                /*
                 * Catch-up, fold however many we are behind still
                 */
-               delta = jiffies - calc_load_update - 10;
+               delta = jiffies - sample_window - 10;
                n = 1 + (delta / LOAD_FREQ);
 
                active = atomic_long_read(&calc_load_tasks);
@@ -324,7 +326,7 @@ static void calc_global_nohz(void)
                avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
                avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-               calc_load_update += n * LOAD_FREQ;
+               WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
        }
 
        /*
@@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { }
  */
 void calc_global_load(unsigned long ticks)
 {
+       unsigned long sample_window;
        long active, delta;
 
-       if (time_before(jiffies, calc_load_update + 10))
+       sample_window = READ_ONCE(calc_load_update);
+       if (time_before(jiffies, sample_window + 10))
                return;
 
        /*
@@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks)
        avenrun[1] = calc_load(avenrun[1], EXP_5, active);
        avenrun[2] = calc_load(avenrun[2], EXP_15, active);
 
-       calc_load_update += LOAD_FREQ;
+       WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
 
        /*
         * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
index 295479b..6fa7208 100644 (file)
@@ -125,9 +125,12 @@ void put_online_mems(void)
 
 }
 
+/* Serializes write accesses to mem_hotplug.active_writer. */
+static DEFINE_MUTEX(memory_add_remove_lock);
+
 void mem_hotplug_begin(void)
 {
-       assert_held_device_hotplug();
+       mutex_lock(&memory_add_remove_lock);
 
        mem_hotplug.active_writer = current;
 
@@ -147,6 +150,7 @@ void mem_hotplug_done(void)
        mem_hotplug.active_writer = NULL;
        mutex_unlock(&mem_hotplug.lock);
        memhp_lock_release();
+       mutex_unlock(&memory_add_remove_lock);
 }
 
 /* add this memory to iomem resource */
index 9b5bc86..b1ccb58 100644 (file)
@@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry)
 {
        struct swap_slots_cache *cache;
 
-       BUG_ON(!swap_slot_cache_initialized);
-
        cache = &get_cpu_var(swp_slots);
        if (use_swap_slot_cache && cache->slots_ret) {
                spin_lock_irq(&cache->free_lock);
index 0dd8022..0b05762 100644 (file)
@@ -1683,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
                if (fatal_signal_pending(current)) {
                        area->nr_pages = i;
-                       goto fail;
+                       goto fail_no_warn;
                }
 
                if (node == NUMA_NO_NODE)
@@ -1709,6 +1709,7 @@ fail:
        warn_alloc(gfp_mask, NULL,
                          "vmalloc: allocation failure, allocated %ld of %ld bytes",
                          (area->nr_pages*PAGE_SIZE), area->size);
+fail_no_warn:
        vfree(area->addr);
        return NULL;
 }
index 8970a2f..f9492bc 100644 (file)
@@ -667,6 +667,7 @@ next:
                        z3fold_page_unlock(zhdr);
                        spin_lock(&pool->lock);
                        if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+                               spin_unlock(&pool->lock);
                                atomic64_dec(&pool->pages_nr);
                                return 0;
                        }
index 7c3d994..71343d0 100644 (file)
@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
        batadv_iv_ogm_schedule(hard_iface);
 }
 
+/**
+ * batadv_iv_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
+{
+       /* set default TQ difference threshold to 20 */
+       atomic_set(&bat_priv->gw.sel_class, 20);
+}
+
 static struct batadv_gw_node *
 batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
 {
@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
                .del_if = batadv_iv_ogm_orig_del_if,
        },
        .gw = {
+               .init_sel_class = batadv_iv_init_sel_class,
                .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
                .is_eligible = batadv_iv_gw_is_eligible,
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
index 0acd081..a36c8e7 100644 (file)
@@ -668,6 +668,16 @@ err_ifinfo1:
        return ret;
 }
 
+/**
+ * batadv_v_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
+{
+       /* set default throughput difference threshold to 5Mbps */
+       atomic_set(&bat_priv->gw.sel_class, 50);
+}
+
 static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
                                        char *buff, size_t count)
 {
@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
                .dump = batadv_v_orig_dump,
        },
        .gw = {
+               .init_sel_class = batadv_v_init_sel_class,
                .store_sel_class = batadv_v_store_sel_class,
                .show_sel_class = batadv_v_show_sel_class,
                .get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
        if (ret < 0)
                return ret;
 
-       /* set default throughput difference threshold to 5Mbps */
-       atomic_set(&bat_priv->gw.sel_class, 50);
-
        return 0;
 }
 
index 11a23fd..8f964be 100644 (file)
@@ -404,7 +404,7 @@ out:
  * batadv_frag_create - create a fragment from skb
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
- * @mtu: size of new fragment
+ * @fragment_size: size of new fragment
  *
  * Split the passed skb into two fragments: A new one with size matching the
  * passed mtu and the old one with the rest. The new skb contains data from the
@@ -414,11 +414,11 @@ out:
  */
 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
                                          struct batadv_frag_packet *frag_head,
-                                         unsigned int mtu)
+                                         unsigned int fragment_size)
 {
        struct sk_buff *skb_fragment;
        unsigned int header_size = sizeof(*frag_head);
-       unsigned int fragment_size = mtu - header_size;
+       unsigned int mtu = fragment_size + header_size;
 
        skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
        if (!skb_fragment)
@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
        struct sk_buff *skb_fragment;
        unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
        unsigned int header_size = sizeof(frag_header);
-       unsigned int max_fragment_size, max_packet_size;
+       unsigned int max_fragment_size, num_fragments;
        int ret;
 
        /* To avoid merge and refragmentation at next-hops we never send
@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
         */
        mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
        max_fragment_size = mtu - header_size;
-       max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+       if (skb->len == 0 || max_fragment_size == 0)
+               return -EINVAL;
+
+       num_fragments = (skb->len - 1) / max_fragment_size + 1;
+       max_fragment_size = (skb->len - 1) / num_fragments + 1;
 
        /* Don't even try to fragment, if we need more than 16 fragments */
-       if (skb->len > max_packet_size) {
+       if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
                ret = -EAGAIN;
                goto free_skb;
        }
@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                        goto put_primary_if;
                }
 
-               skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+               skb_fragment = batadv_frag_create(skb, &frag_header,
+                                                 max_fragment_size);
                if (!skb_fragment) {
                        ret = -ENOMEM;
                        goto put_primary_if;
index 5db2e43..33940c5 100644 (file)
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
  */
 void batadv_gw_init(struct batadv_priv *bat_priv)
 {
+       if (bat_priv->algo_ops->gw.init_sel_class)
+               bat_priv->algo_ops->gw.init_sel_class(bat_priv);
+       else
+               atomic_set(&bat_priv->gw.sel_class, 1);
+
        batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
                                     NULL, BATADV_TVLV_GW, 1,
                                     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
index 5d099b2..d042c99 100644 (file)
@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
        atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
 #endif
        atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
-       atomic_set(&bat_priv->gw.sel_class, 20);
        atomic_set(&bat_priv->gw.bandwidth_down, 100);
        atomic_set(&bat_priv->gw.bandwidth_up, 20);
        atomic_set(&bat_priv->orig_interval, 1000);
index 66b25e4..246f21b 100644 (file)
@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
 
 /**
  * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ * @init_sel_class: initialize GW selection class (optional)
  * @store_sel_class: parse and stores a new GW selection class (optional)
  * @show_sel_class: prints the current GW selection class (optional)
  * @get_best_gw_node: select the best GW from the list of available nodes
@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
  * @dump: dump gateways to a netlink socket (optional)
  */
 struct batadv_algo_gw_ops {
+       void (*init_sel_class)(struct batadv_priv *bat_priv);
        ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
                                   size_t count);
        ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
index 4f598dc..6e08b71 100644 (file)
@@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
        struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *fdb;
 
-       WARN_ON_ONCE(!br_hash_lock_held(br));
+       lockdep_assert_held_once(&br->hash_lock);
 
        rcu_read_lock();
        fdb = fdb_find_rcu(head, addr, vid);
index fa87fbd..1f1e620 100644 (file)
@@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 
 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct nf_bridge_info *nf_bridge;
-       unsigned int mtu_reserved;
+       struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+       unsigned int mtu, mtu_reserved;
 
        mtu_reserved = nf_bridge_mtu_reduction(skb);
+       mtu = skb->dev->mtu;
+
+       if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+               mtu = nf_bridge->frag_max_size;
 
-       if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+       if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
                nf_bridge_info_free(skb);
                return br_dev_queue_push_xmit(net, sk, skb);
        }
 
-       nf_bridge = nf_bridge_info_get(skb);
-
        /* This is wrong! We should preserve the original fragment
         * boundaries by preserving frag_list rather than refragmenting.
         */
index 2288fca..6136818 100644 (file)
@@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid);
 
-static inline bool br_hash_lock_held(struct net_bridge *br)
-{
-#ifdef CONFIG_LOCKDEP
-       return lockdep_is_held(&br->hash_lock);
-#else
-       return true;
-#endif
-}
-
 /* br_forward.c */
 enum br_pkt_type {
        BR_PKT_UNICAST,
index 38dcf1e..f76bb33 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/kthread.h>
 #include <linux/net.h>
 #include <linux/nsproxy.h>
+#include <linux/sched/mm.h>
 #include <linux/slab.h>
 #include <linux/socket.h>
 #include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
 {
        struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
        struct socket *sock;
+       unsigned int noio_flag;
        int ret;
 
        BUG_ON(con->sock);
+
+       /* sock_create_kern() allocates with GFP_KERNEL */
+       noio_flag = memalloc_noio_save();
        ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
+       memalloc_noio_restore(noio_flag);
        if (ret)
                return ret;
        sock->sk->sk_allocation = GFP_NOFS;
index 6ae5603..029a61a 100644 (file)
@@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
        return 0;
 }
 
-static void update_classid(struct cgroup_subsys_state *css, void *v)
+static void cgrp_attach(struct cgroup_taskset *tset)
 {
-       struct css_task_iter it;
+       struct cgroup_subsys_state *css;
        struct task_struct *p;
 
-       css_task_iter_start(css, &it);
-       while ((p = css_task_iter_next(&it))) {
+       cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock, v);
+               iterate_fd(p->files, 0, update_classid_sock,
+                          (void *)(unsigned long)css_cls_state(css)->classid);
                task_unlock(p);
        }
-       css_task_iter_end(&it);
-}
-
-static void cgrp_attach(struct cgroup_taskset *tset)
-{
-       struct cgroup_subsys_state *css;
-
-       cgroup_taskset_first(tset, &css);
-       update_classid(css,
-                      (void *)(unsigned long)css_cls_state(css)->classid);
 }
 
 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
                         u64 value)
 {
        struct cgroup_cls_state *cs = css_cls_state(css);
+       struct css_task_iter it;
+       struct task_struct *p;
 
        cgroup_sk_alloc_disable();
 
        cs->classid = (u32)value;
 
-       update_classid(css, (void *)(unsigned long)cs->classid);
+       css_task_iter_start(css, &it);
+       while ((p = css_task_iter_next(&it))) {
+               task_lock(p);
+               iterate_fd(p->files, 0, update_classid_sock,
+                          (void *)(unsigned long)cs->classid);
+               task_unlock(p);
+       }
+       css_task_iter_end(&it);
+
        return 0;
 }
 
index cd4ba8c..9f78109 100644 (file)
@@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
 }
 
+static void skb_set_err_queue(struct sk_buff *skb)
+{
+       /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
+        * So, it is safe to (mis)use it to mark skbs on the error queue.
+        */
+       skb->pkt_type = PACKET_OUTGOING;
+       BUILD_BUG_ON(PACKET_OUTGOING == 0);
+}
+
 /*
  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
  */
@@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
        skb->sk = sk;
        skb->destructor = sock_rmem_free;
        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+       skb_set_err_queue(skb);
 
        /* before exiting rcu section, make sure dst is refcounted */
        skb_dst_force(skb);
@@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
 
 static void __skb_complete_tx_timestamp(struct sk_buff *skb,
                                        struct sock *sk,
-                                       int tstype)
+                                       int tstype,
+                                       bool opt_stats)
 {
        struct sock_exterr_skb *serr;
        int err;
 
+       BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
+
        serr = SKB_EXT_ERR(skb);
        memset(serr, 0, sizeof(*serr));
        serr->ee.ee_errno = ENOMSG;
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        serr->ee.ee_info = tstype;
+       serr->opt_stats = opt_stats;
        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk->sk_protocol == IPPROTO_TCP &&
@@ -3833,7 +3847,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
         */
        if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
                *skb_hwtstamps(skb) = *hwtstamps;
-               __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+               __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
                sock_put(sk);
        }
 }
@@ -3844,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
                     struct sock *sk, int tstype)
 {
        struct sk_buff *skb;
-       bool tsonly;
+       bool tsonly, opt_stats = false;
 
        if (!sk)
                return;
@@ -3857,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
 #ifdef CONFIG_INET
                if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
                    sk->sk_protocol == IPPROTO_TCP &&
-                   sk->sk_type == SOCK_STREAM)
+                   sk->sk_type == SOCK_STREAM) {
                        skb = tcp_get_timestamping_opt_stats(sk);
-               else
+                       opt_stats = true;
+               } else
 #endif
                        skb = alloc_skb(0, GFP_ATOMIC);
        } else {
@@ -3878,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
        else
                skb->tstamp = ktime_get_real();
 
-       __skb_complete_tx_timestamp(skb, sk, tstype);
+       __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
 }
 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
 
index a96d5f7..2c4f574 100644 (file)
@@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
                pr_debug("%s: optmem leakage (%d bytes) detected\n",
                         __func__, atomic_read(&sk->sk_omem_alloc));
 
+       if (sk->sk_frag.page) {
+               put_page(sk->sk_frag.page);
+               sk->sk_frag.page = NULL;
+       }
+
        if (sk->sk_peer_cred)
                put_cred(sk->sk_peer_cred);
        put_pid(sk->sk_peer_pid);
@@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                        is_charged = sk_filter_charge(newsk, filter);
 
                if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+                       /* We need to make sure that we don't uncharge the new
+                        * socket if we couldn't charge it in the first place
+                        * as otherwise we uncharge the parent's filter.
+                        */
+                       if (!is_charged)
+                               RCU_INIT_POINTER(newsk->sk_filter, NULL);
                        sk_free_unlock_clone(newsk);
                        newsk = NULL;
                        goto out;
@@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk)
 
        sk_refcnt_debug_release(sk);
 
-       if (sk->sk_frag.page) {
-               put_page(sk->sk_frag.page);
-               sk->sk_frag.page = NULL;
-       }
-
        sock_put(sk);
 }
 EXPORT_SYMBOL(sk_common_release);
index 42bfd08..8f2133f 100644 (file)
@@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
 
        net = sock_net(skb->sk);
        nlh = nlmsg_hdr(skb);
-       if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+       if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+           skb->len < nlh->nlmsg_len ||
            nlmsg_len(nlh) < sizeof(*frn))
                return;
 
index bbe7f72..b3cdeec 100644 (file)
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
        qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
        net = container_of(qp->q.net, struct net, ipv4.frags);
 
+       rcu_read_lock();
        spin_lock(&qp->q.lock);
 
        if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
        __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 
        if (!inet_frag_evicting(&qp->q)) {
-               struct sk_buff *head = qp->q.fragments;
+               struct sk_buff *clone, *head = qp->q.fragments;
                const struct iphdr *iph;
                int err;
 
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
                if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
                        goto out;
 
-               rcu_read_lock();
                head->dev = dev_get_by_index_rcu(net, qp->iif);
                if (!head->dev)
-                       goto out_rcu_unlock;
+                       goto out;
+
 
                /* skb has no dst, perform route lookup again */
                iph = ip_hdr(head);
                err = ip_route_input_noref(head, iph->daddr, iph->saddr,
                                           iph->tos, head->dev);
                if (err)
-                       goto out_rcu_unlock;
+                       goto out;
 
                /* Only an end host needs to send an ICMP
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (frag_expire_skip_icmp(qp->user) &&
                    (skb_rtable(head)->rt_type != RTN_LOCAL))
-                       goto out_rcu_unlock;
+                       goto out;
+
+               clone = skb_clone(head, GFP_ATOMIC);
 
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
-               icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-out_rcu_unlock:
-               rcu_read_unlock();
+               if (clone) {
+                       spin_unlock(&qp->q.lock);
+                       icmp_send(clone, ICMP_TIME_EXCEEDED,
+                                 ICMP_EXC_FRAGTIME, 0);
+                       consume_skb(clone);
+                       goto out_rcu_unlock;
+               }
        }
 out:
        spin_unlock(&qp->q.lock);
+out_rcu_unlock:
+       rcu_read_unlock();
        ipq_put(qp);
 }
 
index bc1486f..2e14ed1 100644 (file)
@@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
+
+       if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+               return NF_ACCEPT;
+
        return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
 }
 
index f8aad03..6f5e8d0 100644 (file)
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
        /* maniptype == SRC for postrouting. */
        enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
 
-       /* We never see fragments: conntrack defrags on pre-routing
-        * and local-out, and nf_nat_out protects post-routing.
-        */
-       NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
        ct = nf_ct_get(skb, &ctinfo);
        /* Can't track?  It's not due to stress, or conntrack would
         * have dropped it.  Hence it's the user's responsibilty to
index a0ea8aa..f186772 100644 (file)
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
        memset(&range, 0, sizeof(range));
        range.flags = priv->flags;
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
        }
        regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
                                                    &range, nft_out(pkt));
index 1650ed2..5120be1 100644 (file)
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
 
        memset(&mr, 0, sizeof(mr));
        if (priv->sreg_proto_min) {
-               mr.range[0].min.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               mr.range[0].max.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               mr.range[0].min.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               mr.range[0].max.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
                mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index cf45555..1e319a5 100644 (file)
@@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 {
        const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       u32 now = tcp_time_stamp, intv;
+       u32 now, intv;
        u64 rate64;
        bool slow;
        u32 rate;
@@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_retrans = tp->retrans_out;
        info->tcpi_fackets = tp->fackets_out;
 
+       now = tcp_time_stamp;
        info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
        info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
        info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
index 39c393c..c431197 100644 (file)
@@ -5541,6 +5541,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        tcp_set_state(sk, TCP_ESTABLISHED);
+       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
        if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5759,7 +5760,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                         * to stand against the temptation 8)     --ANK
                         */
                        inet_csk_schedule_ack(sk);
-                       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
                        tcp_enter_quickack_mode(sk);
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                  TCP_DELACK_MAX, TCP_RTO_MAX);
index 7e16243..65c0f3d 100644 (file)
@@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
                minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+               newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
                newtp->packets_out = 0;
                newtp->retrans_out = 0;
index 6c5b5b1..4146536 100644 (file)
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
        memset(&range, 0, sizeof(range));
        range.flags = priv->flags;
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
        }
        regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
                                                    nft_out(pkt));
index f5ac080..a27e424 100644 (file)
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
 
        memset(&range, 0, sizeof(range));
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min],
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max],
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 35c58b6..9db1418 100644 (file)
@@ -3423,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
        }
        else if (rt->rt6i_flags & RTF_LOCAL)
                rtm->rtm_type = RTN_LOCAL;
+       else if (rt->rt6i_flags & RTF_ANYCAST)
+               rtm->rtm_type = RTN_ANYCAST;
        else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
                rtm->rtm_type = RTN_LOCAL;
        else
index 4e4c401..e28082f 100644 (file)
@@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        ipc6.hlimit = -1;
        ipc6.tclass = -1;
        ipc6.dontfrag = -1;
+       sockc.tsflags = sk->sk_tsflags;
 
        /* destination address check */
        if (sin6) {
@@ -1159,7 +1160,6 @@ do_udp_sendmsg:
 
        fl6.flowi6_mark = sk->sk_mark;
        fl6.flowi6_uid = sk->sk_uid;
-       sockc.tsflags = sk->sk_tsflags;
 
        if (msg->msg_controllen) {
                opt = &opt_space;
index 33211f9..6414079 100644 (file)
@@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
+       unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
+       unsigned int alive;
        unsigned index;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
                if (!rt)
                        continue;
 
+               alive = 0;
                change_nexthops(rt) {
                        if (rtnl_dereference(nh->nh_dev) != dev)
-                               continue;
+                               goto next;
+
                        switch (event) {
                        case NETDEV_DOWN:
                        case NETDEV_UNREGISTER:
@@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
                                /* fall through */
                        case NETDEV_CHANGE:
                                nh->nh_flags |= RTNH_F_LINKDOWN;
-                               if (event != NETDEV_UNREGISTER)
-                                       ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
                                break;
                        }
                        if (event == NETDEV_UNREGISTER)
                                RCU_INIT_POINTER(nh->nh_dev, NULL);
+next:
+                       if (!(nh->nh_flags & nh_flags))
+                               alive++;
                } endfor_nexthops(rt);
+
+               WRITE_ONCE(rt->rt_nhn_alive, alive);
        }
 }
 
index 071b97f..ffb78e5 100644 (file)
@@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
 unsigned int nf_conntrack_max __read_mostly;
 seqcount_t nf_conntrack_generation __read_mostly;
 
-DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
+ * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
+ * alignment to enforce this.
+ */
+DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
 static unsigned int nf_conntrack_hash_rnd __read_mostly;
index 31d3586..804e8a0 100644 (file)
@@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
               enum nf_nat_manip_type maniptype)
 {
        sctp_sctphdr_t *hdr;
+       int hdrsize = 8;
 
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+       /* This could be an inner header returned in imcp packet; in such
+        * cases we cannot update the checksum field since it is outside
+        * of the 8 bytes of transport layer headers we are guaranteed.
+        */
+       if (skb->len >= hdroff + sizeof(*hdr))
+               hdrsize = sizeof(*hdr);
+
+       if (!skb_make_writable(skb, hdroff + hdrsize))
                return false;
 
        hdr = (struct sctphdr *)(skb->data + hdroff);
@@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
                hdr->dest = tuple->dst.u.sctp.port;
        }
 
+       if (hdrsize < sizeof(*hdr))
+               return true;
+
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                hdr->checksum = sctp_compute_cksum(skb, hdroff);
                skb->ip_summed = CHECKSUM_NONE;
index 5e0ccfd..434c739 100644 (file)
@@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                iter.count      = 0;
                iter.err        = 0;
                iter.fn         = nf_tables_bind_check_setelem;
-               iter.flush      = false;
 
                set->ops->walk(ctx, set, &iter);
                if (iter.err < 0)
@@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        args.iter.count         = 0;
        args.iter.err           = 0;
        args.iter.fn            = nf_tables_dump_setelem;
-       args.iter.flush         = false;
        set->ops->walk(&ctx, set, &args.iter);
 
        nla_nest_end(skb, nest);
@@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
                struct nft_set_iter iter = {
                        .genmask        = genmask,
                        .fn             = nft_flush_set,
-                       .flush          = true,
                };
                set->ops->walk(&ctx, set, &iter);
 
@@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                        iter.count      = 0;
                        iter.err        = 0;
                        iter.fn         = nf_tables_loop_check_setelem;
-                       iter.flush      = false;
 
                        set->ops->walk(ctx, set, &iter);
                        if (iter.err < 0)
index bf548a7..0264258 100644 (file)
@@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
 
        switch (priv->key) {
        case NFT_CT_DIRECTION:
-               *dest = CTINFO2DIR(ctinfo);
+               nft_reg_store8(dest, CTINFO2DIR(ctinfo));
                return;
        case NFT_CT_STATUS:
                *dest = ct->status;
@@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
        }
        case NFT_CT_L3PROTOCOL:
-               *dest = nf_ct_l3num(ct);
+               nft_reg_store8(dest, nf_ct_l3num(ct));
                return;
        case NFT_CT_PROTOCOL:
-               *dest = nf_ct_protonum(ct);
+               nft_reg_store8(dest, nf_ct_protonum(ct));
                return;
 #ifdef CONFIG_NF_CONNTRACK_ZONES
        case NFT_CT_ZONE: {
                const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+               u16 zoneid;
 
                if (priv->dir < IP_CT_DIR_MAX)
-                       *dest = nf_ct_zone_id(zone, priv->dir);
+                       zoneid = nf_ct_zone_id(zone, priv->dir);
                else
-                       *dest = zone->id;
+                       zoneid = zone->id;
 
+               nft_reg_store16(dest, zoneid);
                return;
        }
 #endif
@@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                       nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
                return;
        case NFT_CT_PROTO_SRC:
-               *dest = (__force __u16)tuple->src.u.all;
+               nft_reg_store16(dest, (__force u16)tuple->src.u.all);
                return;
        case NFT_CT_PROTO_DST:
-               *dest = (__force __u16)tuple->dst.u.all;
+               nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
                return;
        default:
                break;
@@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
        const struct nft_ct *priv = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
        enum ip_conntrack_info ctinfo;
-       u16 value = regs->data[priv->sreg];
+       u16 value = nft_reg_load16(&regs->data[priv->sreg]);
        struct nf_conn *ct;
 
        ct = nf_ct_get(skb, &ctinfo);
@@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
                case IP_CT_DIR_REPLY:
                        break;
                default:
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err1;
                }
        }
 
index e1f5ca9..7b60e01 100644 (file)
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                *dest = skb->len;
                break;
        case NFT_META_PROTOCOL:
-               *dest = 0;
-               *(__be16 *)dest = skb->protocol;
+               nft_reg_store16(dest, (__force u16)skb->protocol);
                break;
        case NFT_META_NFPROTO:
-               *dest = nft_pf(pkt);
+               nft_reg_store8(dest, nft_pf(pkt));
                break;
        case NFT_META_L4PROTO:
                if (!pkt->tprot_set)
                        goto err;
-               *dest = pkt->tprot;
+               nft_reg_store8(dest, pkt->tprot);
                break;
        case NFT_META_PRIORITY:
                *dest = skb->priority;
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
        case NFT_META_IIFTYPE:
                if (in == NULL)
                        goto err;
-               *dest = 0;
-               *(u16 *)dest = in->type;
+               nft_reg_store16(dest, in->type);
                break;
        case NFT_META_OIFTYPE:
                if (out == NULL)
                        goto err;
-               *dest = 0;
-               *(u16 *)dest = out->type;
+               nft_reg_store16(dest, out->type);
                break;
        case NFT_META_SKUID:
                sk = skb_to_full_sk(skb);
@@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
 #endif
        case NFT_META_PKTTYPE:
                if (skb->pkt_type != PACKET_LOOPBACK) {
-                       *dest = skb->pkt_type;
+                       nft_reg_store8(dest, skb->pkt_type);
                        break;
                }
 
                switch (nft_pf(pkt)) {
                case NFPROTO_IPV4:
                        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
-                               *dest = PACKET_MULTICAST;
+                               nft_reg_store8(dest, PACKET_MULTICAST);
                        else
-                               *dest = PACKET_BROADCAST;
+                               nft_reg_store8(dest, PACKET_BROADCAST);
                        break;
                case NFPROTO_IPV6:
-                       *dest = PACKET_MULTICAST;
+                       nft_reg_store8(dest, PACKET_MULTICAST);
                        break;
                case NFPROTO_NETDEV:
                        switch (skb->protocol) {
@@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                                        goto err;
 
                                if (ipv4_is_multicast(iph->daddr))
-                                       *dest = PACKET_MULTICAST;
+                                       nft_reg_store8(dest, PACKET_MULTICAST);
                                else
-                                       *dest = PACKET_BROADCAST;
+                                       nft_reg_store8(dest, PACKET_BROADCAST);
 
                                break;
                        }
                        case htons(ETH_P_IPV6):
-                               *dest = PACKET_MULTICAST;
+                               nft_reg_store8(dest, PACKET_MULTICAST);
                                break;
                        default:
                                WARN_ON_ONCE(1);
@@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 {
        const struct nft_meta *meta = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
-       u32 value = regs->data[meta->sreg];
+       u32 *sreg = &regs->data[meta->sreg];
+       u32 value = *sreg;
+       u8 pkt_type;
 
        switch (meta->key) {
        case NFT_META_MARK:
@@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
                skb->priority = value;
                break;
        case NFT_META_PKTTYPE:
-               if (skb->pkt_type != value &&
-                   skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
-                       skb->pkt_type = value;
+               pkt_type = nft_reg_load8(sreg);
+
+               if (skb->pkt_type != pkt_type &&
+                   skb_pkt_type_ok(pkt_type) &&
+                   skb_pkt_type_ok(skb->pkt_type))
+                       skb->pkt_type = pkt_type;
                break;
        case NFT_META_NFTRACE:
                skb->nf_trace = !!value;
index 19a7bf3..439e0bd 100644 (file)
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
        }
 
        if (priv->sreg_proto_min) {
-               range.min_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_min];
-               range.max_proto.all =
-                       *(__be16 *)&regs->data[priv->sreg_proto_max];
+               range.min_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_min]);
+               range.max_proto.all = (__force __be16)nft_reg_load16(
+                       &regs->data[priv->sreg_proto_max]);
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 152d226..8ebbc29 100644 (file)
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
+struct nft_bitmap_elem {
+       struct list_head        head;
+       struct nft_set_ext      ext;
+};
+
 /* This bitmap uses two bits to represent one element. These two bits determine
  * the element state in the current and the future generation.
  *
  *      restore its previous state.
  */
 struct nft_bitmap {
-       u16     bitmap_size;
-       u8      bitmap[];
+       struct  list_head       list;
+       u16                     bitmap_size;
+       u8                      bitmap[];
 };
 
-static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off)
+static inline void nft_bitmap_location(const struct nft_set *set,
+                                      const void *key,
+                                      u32 *idx, u32 *off)
 {
-       u32 k = (key << 1);
+       u32 k;
+
+       if (set->klen == 2)
+               k = *(u16 *)key;
+       else
+               k = *(u8 *)key;
+       k <<= 1;
 
        *idx = k / BITS_PER_BYTE;
        *off = k % BITS_PER_BYTE;
@@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
        u8 genmask = nft_genmask_cur(net);
        u32 idx, off;
 
-       nft_bitmap_location(*key, &idx, &off);
+       nft_bitmap_location(set, key, &idx, &off);
 
        return nft_bitmap_active(priv->bitmap, idx, off, genmask);
 }
 
+static struct nft_bitmap_elem *
+nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
+                    u8 genmask)
+{
+       const struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_bitmap_elem *be;
+
+       list_for_each_entry_rcu(be, &priv->list, head) {
+               if (memcmp(nft_set_ext_key(&be->ext),
+                          nft_set_ext_key(&this->ext), set->klen) ||
+                   !nft_set_elem_active(&be->ext, genmask))
+                       continue;
+
+               return be;
+       }
+       return NULL;
+}
+
 static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
                             const struct nft_set_elem *elem,
-                            struct nft_set_ext **_ext)
+                            struct nft_set_ext **ext)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext *ext = elem->priv;
+       struct nft_bitmap_elem *new = elem->priv, *be;
        u8 genmask = nft_genmask_next(net);
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
-       if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
+       be = nft_bitmap_elem_find(set, new, genmask);
+       if (be) {
+               *ext = &be->ext;
                return -EEXIST;
+       }
 
+       nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
        /* Enter 01 state. */
        priv->bitmap[idx] |= (genmask << off);
+       list_add_tail_rcu(&new->head, &priv->list);
 
        return 0;
 }
@@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
                              const struct nft_set_elem *elem)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext *ext = elem->priv;
+       struct nft_bitmap_elem *be = elem->priv;
        u8 genmask = nft_genmask_next(net);
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 00 state. */
        priv->bitmap[idx] &= ~(genmask << off);
+       list_del_rcu(&be->head);
 }
 
 static void nft_bitmap_activate(const struct net *net,
@@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
                                const struct nft_set_elem *elem)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext *ext = elem->priv;
+       struct nft_bitmap_elem *be = elem->priv;
        u8 genmask = nft_genmask_next(net);
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 11 state. */
        priv->bitmap[idx] |= (genmask << off);
+       nft_set_elem_change_active(net, set, &be->ext);
 }
 
 static bool nft_bitmap_flush(const struct net *net,
-                            const struct nft_set *set, void *ext)
+                            const struct nft_set *set, void *_be)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
        u8 genmask = nft_genmask_next(net);
+       struct nft_bitmap_elem *be = _be;
        u32 idx, off;
 
-       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
        /* Enter 10 state, similar to deactivation. */
        priv->bitmap[idx] &= ~(genmask << off);
+       nft_set_elem_change_active(net, set, &be->ext);
 
        return true;
 }
 
-static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
-                                               const struct nft_set_elem *elem)
-{
-       struct nft_set_ext_tmpl tmpl;
-       struct nft_set_ext *ext;
-
-       nft_set_ext_prepare(&tmpl);
-       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
-       ext = kzalloc(tmpl.len, GFP_KERNEL);
-       if (!ext)
-               return NULL;
-
-       nft_set_ext_init(ext, &tmpl);
-       memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
-
-       return ext;
-}
-
 static void *nft_bitmap_deactivate(const struct net *net,
                                   const struct nft_set *set,
                                   const struct nft_set_elem *elem)
 {
        struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_bitmap_elem *this = elem->priv, *be;
        u8 genmask = nft_genmask_next(net);
-       struct nft_set_ext *ext;
-       u32 idx, off, key = 0;
-
-       memcpy(&key, elem->key.val.data, set->klen);
-       nft_bitmap_location(key, &idx, &off);
+       u32 idx, off;
 
-       if (!nft_bitmap_active(priv->bitmap, idx, off, genmask))
-               return NULL;
+       nft_bitmap_location(set, elem->key.val.data, &idx, &off);
 
-       /* We have no real set extension since this is a bitmap, allocate this
-        * dummy object that is released from the commit/abort path.
-        */
-       ext = nft_bitmap_ext_alloc(set, elem);
-       if (!ext)
+       be = nft_bitmap_elem_find(set, this, genmask);
+       if (!be)
                return NULL;
 
        /* Enter 10 state. */
        priv->bitmap[idx] &= ~(genmask << off);
+       nft_set_elem_change_active(net, set, &be->ext);
 
-       return ext;
+       return be;
 }
 
 static void nft_bitmap_walk(const struct nft_ctx *ctx,
@@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
                            struct nft_set_iter *iter)
 {
        const struct nft_bitmap *priv = nft_set_priv(set);
-       struct nft_set_ext_tmpl tmpl;
+       struct nft_bitmap_elem *be;
        struct nft_set_elem elem;
-       struct nft_set_ext *ext;
-       int idx, off;
-       u16 key;
-
-       nft_set_ext_prepare(&tmpl);
-       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
-       for (idx = 0; idx < priv->bitmap_size; idx++) {
-               for (off = 0; off < BITS_PER_BYTE; off += 2) {
-                       if (iter->count < iter->skip)
-                               goto cont;
-
-                       if (!nft_bitmap_active(priv->bitmap, idx, off,
-                                              iter->genmask))
-                               goto cont;
-
-                       ext = kzalloc(tmpl.len, GFP_KERNEL);
-                       if (!ext) {
-                               iter->err = -ENOMEM;
-                               return;
-                       }
-                       nft_set_ext_init(ext, &tmpl);
-                       key = ((idx * BITS_PER_BYTE) + off) >> 1;
-                       memcpy(nft_set_ext_key(ext), &key, set->klen);
-
-                       elem.priv = ext;
-                       iter->err = iter->fn(ctx, set, iter, &elem);
-
-                       /* On set flush, this dummy extension object is released
-                        * from the commit/abort path.
-                        */
-                       if (!iter->flush)
-                               kfree(ext);
-
-                       if (iter->err < 0)
-                               return;
+
+       list_for_each_entry_rcu(be, &priv->list, head) {
+               if (iter->count < iter->skip)
+                       goto cont;
+               if (!nft_set_elem_active(&be->ext, iter->genmask))
+                       goto cont;
+
+               elem.priv = be;
+
+               iter->err = iter->fn(ctx, set, iter, &elem);
+
+               if (iter->err < 0)
+                       return;
 cont:
-                       iter->count++;
-               }
+               iter->count++;
        }
 }
 
@@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
 {
        struct nft_bitmap *priv = nft_set_priv(set);
 
+       INIT_LIST_HEAD(&priv->list);
        priv->bitmap_size = nft_bitmap_size(set->klen);
 
        return 0;
@@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
 
 static struct nft_set_ops nft_bitmap_ops __read_mostly = {
        .privsize       = nft_bitmap_privsize,
+       .elemsize       = offsetof(struct nft_bitmap_elem, ext),
        .estimate       = nft_bitmap_estimate,
        .init           = nft_bitmap_init,
        .destroy        = nft_bitmap_destroy,
index 7b73c7c..596eaff 100644 (file)
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
 
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
+static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
+
+static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+       "nlk_cb_mutex-ROUTE",
+       "nlk_cb_mutex-1",
+       "nlk_cb_mutex-USERSOCK",
+       "nlk_cb_mutex-FIREWALL",
+       "nlk_cb_mutex-SOCK_DIAG",
+       "nlk_cb_mutex-NFLOG",
+       "nlk_cb_mutex-XFRM",
+       "nlk_cb_mutex-SELINUX",
+       "nlk_cb_mutex-ISCSI",
+       "nlk_cb_mutex-AUDIT",
+       "nlk_cb_mutex-FIB_LOOKUP",
+       "nlk_cb_mutex-CONNECTOR",
+       "nlk_cb_mutex-NETFILTER",
+       "nlk_cb_mutex-IP6_FW",
+       "nlk_cb_mutex-DNRTMSG",
+       "nlk_cb_mutex-KOBJECT_UEVENT",
+       "nlk_cb_mutex-GENERIC",
+       "nlk_cb_mutex-17",
+       "nlk_cb_mutex-SCSITRANSPORT",
+       "nlk_cb_mutex-ECRYPTFS",
+       "nlk_cb_mutex-RDMA",
+       "nlk_cb_mutex-CRYPTO",
+       "nlk_cb_mutex-SMC",
+       "nlk_cb_mutex-23",
+       "nlk_cb_mutex-24",
+       "nlk_cb_mutex-25",
+       "nlk_cb_mutex-26",
+       "nlk_cb_mutex-27",
+       "nlk_cb_mutex-28",
+       "nlk_cb_mutex-29",
+       "nlk_cb_mutex-30",
+       "nlk_cb_mutex-31",
+       "nlk_cb_mutex-MAX_LINKS"
+};
+
 static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
        } else {
                nlk->cb_mutex = &nlk->cb_def_mutex;
                mutex_init(nlk->cb_mutex);
+               lockdep_set_class_and_name(nlk->cb_mutex,
+                                          nlk_cb_mutex_keys + protocol,
+                                          nlk_cb_mutex_key_strings[protocol]);
        }
        init_waitqueue_head(&nlk->wait);
 
index fb6e10f..92e0981 100644 (file)
@@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
 
                if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                  skb, CTRL_CMD_NEWFAMILY) < 0)
+                                  skb, CTRL_CMD_NEWFAMILY) < 0) {
+                       n--;
                        break;
+               }
        }
 
        cb->args[0] = n;
index 6f5fa50..1105a83 100644 (file)
@@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
                        ipv4 = true;
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
-                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
                                        nla_get_in6_addr(a), is_mask);
                        ipv6 = true;
                        break;
@@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
                        tun_flags |= TUNNEL_VXLAN_OPT;
                        opts_type = type;
                        break;
+               case OVS_TUNNEL_KEY_ATTR_PAD:
+                       break;
                default:
                        OVS_NLERR(log, "Unknown IP tunnel attribute %d",
                                  type);
index 3f9d8d7..b099b64 100644 (file)
@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                rxrpc_conn_retransmit_call(conn, skb);
                return 0;
 
+       case RXRPC_PACKET_TYPE_BUSY:
+               /* Just ignore BUSY packets for now. */
+               return 0;
+
        case RXRPC_PACKET_TYPE_ABORT:
                if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
                                  &wtmp, sizeof(wtmp)) < 0)
index 802ac7c..5334e30 100644 (file)
@@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 
        if (p->set_tc_index) {
+               int wlen = skb_network_offset(skb);
+
                switch (tc_skb_protocol(skb)) {
                case htons(ETH_P_IP):
-                       if (skb_cow_head(skb, sizeof(struct iphdr)))
+                       wlen += sizeof(struct iphdr);
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
                                goto drop;
 
                        skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                        break;
 
                case htons(ETH_P_IPV6):
-                       if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+                       wlen += sizeof(struct ipv6hdr);
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
                                goto drop;
 
                        skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
index 2a6835b..0439a1a 100644 (file)
@@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 {
        struct net *net = sock_net(sk);
        struct sctp_sock *sp;
-       int i;
        sctp_paramhdr_t *p;
-       int err;
+       int i;
 
        /* Retrieve the SCTP per socket area.  */
        sp = sctp_sk((struct sock *)sk);
@@ -264,8 +263,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        /* AUTH related initializations */
        INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
-       err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
-       if (err)
+       if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
                goto fail_init;
 
        asoc->active_key_id = ep->active_key_id;
index 71ce6b9..1224421 100644 (file)
@@ -546,7 +546,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        struct sctp_association *asoc = tp->asoc;
        struct sctp_chunk *chunk, *tmp;
        int pkt_count, gso = 0;
-       int confirm;
        struct dst_entry *dst;
        struct sk_buff *head;
        struct sctphdr *sh;
@@ -625,13 +624,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                        asoc->peer.last_sent_to = tp;
        }
        head->ignore_df = packet->ipfragok;
-       confirm = tp->dst_pending_confirm;
-       if (confirm)
+       if (tp->dst_pending_confirm)
                skb_set_dst_pending_confirm(head, 1);
        /* neighbour should be confirmed on successful transmission or
         * positive error
         */
-       if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
+       if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
+           tp->dst_pending_confirm)
                tp->dst_pending_confirm = 0;
 
 out:
index db352e5..025ccff 100644 (file)
@@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
 }
 
 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
-                                   struct sctp_sndrcvinfo *sinfo,
-                                   struct list_head *queue, int msg_len)
+                                   struct sctp_sndrcvinfo *sinfo, int msg_len)
 {
+       struct sctp_outq *q = &asoc->outqueue;
        struct sctp_chunk *chk, *temp;
 
-       list_for_each_entry_safe(chk, temp, queue, list) {
+       list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
                if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
                    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
                        continue;
 
                list_del_init(&chk->list);
+               q->out_qlen -= chk->skb->len;
                asoc->sent_cnt_removable--;
                asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
 
@@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
                        return;
        }
 
-       sctp_prsctp_prune_unsent(asoc, sinfo,
-                                &asoc->outqueue.out_chunk_list,
-                                msg_len);
+       sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
 }
 
 /* Mark all the eligible packets on a transport for retransmission.  */
index e034fe4..985ef06 100644 (file)
@@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 }
 EXPORT_SYMBOL(kernel_sendmsg);
 
+static bool skb_is_err_queue(const struct sk_buff *skb)
+{
+       /* pkt_type of skbs enqueued on the error queue are set to
+        * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
+        * in recvmsg, since skbs received on a local socket will never
+        * have a pkt_type of PACKET_OUTGOING.
+        */
+       return skb->pkt_type == PACKET_OUTGOING;
+}
+
 /*
  * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
  */
@@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
                put_cmsg(msg, SOL_SOCKET,
                         SCM_TIMESTAMPING, sizeof(tss), &tss);
 
-               if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
+               if (skb_is_err_queue(skb) && skb->len &&
+                   SKB_EXT_ERR(skb)->opt_stats)
                        put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
                                 skb->len, skb->data);
        }
index 81cd31a..3b332b3 100644 (file)
@@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        struct ib_cq *sendcq, *recvcq;
        int rc;
 
-       max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+       max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+                       RPCRDMA_MAX_SEND_SGES);
        if (max_sge < RPCRDMA_MIN_SEND_SGES) {
                pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
                return -ENOMEM;
index 9d94e65..271cd66 100644 (file)
@@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 static void tipc_subscrp_timeout(unsigned long data)
 {
        struct tipc_subscription *sub = (struct tipc_subscription *)data;
+       struct tipc_subscriber *subscriber = sub->subscriber;
+
+       spin_lock_bh(&subscriber->lock);
+       tipc_nametbl_unsubscribe(sub);
+       spin_unlock_bh(&subscriber->lock);
 
        /* Notify subscriber of timeout */
        tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
        struct tipc_subscriber *subscriber = sub->subscriber;
 
        spin_lock_bh(&subscriber->lock);
-       tipc_nametbl_unsubscribe(sub);
        list_del(&sub->subscrp_list);
        atomic_dec(&tn->subscription_count);
        spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
                if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
                        continue;
 
+               tipc_nametbl_unsubscribe(sub);
                tipc_subscrp_get(sub);
                spin_unlock_bh(&subscriber->lock);
                tipc_subscrp_delete(sub);
index 6a0d485..c36757e 100644 (file)
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
        if (s) {
                struct unix_sock *u = unix_sk(s);
 
+               BUG_ON(!atomic_long_read(&u->inflight));
                BUG_ON(list_empty(&u->link));
 
                if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
        }
        list_del(&cursor);
 
+       /* Now gc_candidates contains only garbage.  Restore original
+        * inflight counters for these as well, and remove the skbuffs
+        * which are creating the cycle(s).
+        */
+       skb_queue_head_init(&hitlist);
+       list_for_each_entry(u, &gc_candidates, link)
+               scan_children(&u->sk, inc_inflight, &hitlist);
+
        /* not_cycle_list contains those sockets which do not make up a
         * cycle.  Restore these to the inflight list.
         */
@@ -350,14 +359,6 @@ void unix_gc(void)
                list_move_tail(&u->link, &gc_inflight_list);
        }
 
-       /* Now gc_candidates contains only garbage.  Restore original
-        * inflight counters for these as well, and remove the skbuffs
-        * which are creating the cycle(s).
-        */
-       skb_queue_head_init(&hitlist);
-       list_for_each_entry(u, &gc_candidates, link)
-       scan_children(&u->sk, inc_inflight, &hitlist);
-
        spin_unlock(&unix_gc_lock);
 
        /* Here we are. Hitlist is filled. Die. */
index 9f770f3..6f7f675 100644 (file)
@@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
        .sendpage = sock_no_sendpage,
 };
 
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+       if (!transport->cancel_pkt)
+               return -EOPNOTSUPP;
+
+       return transport->cancel_pkt(vsk);
+}
+
 static void vsock_connect_timeout(struct work_struct *work)
 {
        struct sock *sk;
        struct vsock_sock *vsk;
+       int cancel = 0;
 
        vsk = container_of(work, struct vsock_sock, dwork.work);
        sk = sk_vsock(vsk);
@@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
                sk->sk_state = SS_UNCONNECTED;
                sk->sk_err = ETIMEDOUT;
                sk->sk_error_report(sk);
+               cancel = 1;
        }
        release_sock(sk);
+       if (cancel)
+               vsock_transport_cancel_pkt(vsk);
 
        sock_put(sk);
 }
@@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
                        err = sock_intr_errno(timeout);
                        sk->sk_state = SS_UNCONNECTED;
                        sock->state = SS_UNCONNECTED;
+                       vsock_transport_cancel_pkt(vsk);
                        goto out_wait;
                } else if (timeout == 0) {
                        err = -ETIMEDOUT;
                        sk->sk_state = SS_UNCONNECTED;
                        sock->state = SS_UNCONNECTED;
+                       vsock_transport_cancel_pkt(vsk);
                        goto out_wait;
                }
 
index 9d24c0e..68675a1 100644 (file)
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        return len;
 }
 
+static int
+virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+       struct virtio_vsock *vsock;
+       struct virtio_vsock_pkt *pkt, *n;
+       int cnt = 0;
+       LIST_HEAD(freeme);
+
+       vsock = virtio_vsock_get();
+       if (!vsock) {
+               return -ENODEV;
+       }
+
+       spin_lock_bh(&vsock->send_pkt_list_lock);
+       list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+               if (pkt->vsk != vsk)
+                       continue;
+               list_move(&pkt->list, &freeme);
+       }
+       spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+       list_for_each_entry_safe(pkt, n, &freeme, list) {
+               if (pkt->reply)
+                       cnt++;
+               list_del(&pkt->list);
+               virtio_transport_free_pkt(pkt);
+       }
+
+       if (cnt) {
+               struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+               int new_cnt;
+
+               new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+               if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
+                   new_cnt < virtqueue_get_vring_size(rx_vq))
+                       queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+       }
+
+       return 0;
+}
+
 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
 {
        int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
                .release                  = virtio_transport_release,
                .connect                  = virtio_transport_connect,
                .shutdown                 = virtio_transport_shutdown,
+               .cancel_pkt               = virtio_transport_cancel_pkt,
 
                .dgram_bind               = virtio_transport_dgram_bind,
                .dgram_dequeue            = virtio_transport_dgram_dequeue,
index 8d592a4..af087b4 100644 (file)
@@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
        pkt->len                = len;
        pkt->hdr.len            = cpu_to_le32(len);
        pkt->reply              = info->reply;
+       pkt->vsk                = info->vsk;
 
        if (info->msg && len > 0) {
                pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
        struct virtio_vsock_pkt_info info = {
                .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
                .type = type,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
        struct virtio_vsock_pkt_info info = {
                .op = VIRTIO_VSOCK_OP_REQUEST,
                .type = VIRTIO_VSOCK_TYPE_STREAM,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
                          VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
                         (mode & SEND_SHUTDOWN ?
                          VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
                .type = VIRTIO_VSOCK_TYPE_STREAM,
                .msg = msg,
                .pkt_len = len,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
                .op = VIRTIO_VSOCK_OP_RST,
                .type = VIRTIO_VSOCK_TYPE_STREAM,
                .reply = !!pkt,
+               .vsk = vsk,
        };
 
        /* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
                .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
                .remote_port = le32_to_cpu(pkt->hdr.src_port),
                .reply = true,
+               .vsk = vsk,
        };
 
        return virtio_transport_send_pkt_info(vsk, &info);
index d7f8be4..2312dc2 100644 (file)
@@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
 {
        int err;
 
-       rtnl_lock();
-
        if (!cb->args[0]) {
                err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
                                  genl_family_attrbuf(&nl80211_fam),
                                  nl80211_fam.maxattr, nl80211_policy);
                if (err)
-                       goto out_unlock;
+                       return err;
 
                *wdev = __cfg80211_wdev_from_attrs(
                                        sock_net(skb->sk),
                                        genl_family_attrbuf(&nl80211_fam));
-               if (IS_ERR(*wdev)) {
-                       err = PTR_ERR(*wdev);
-                       goto out_unlock;
-               }
+               if (IS_ERR(*wdev))
+                       return PTR_ERR(*wdev);
                *rdev = wiphy_to_rdev((*wdev)->wiphy);
                /* 0 is the first index - add 1 to parse only once */
                cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
                struct wireless_dev *tmp;
 
-               if (!wiphy) {
-                       err = -ENODEV;
-                       goto out_unlock;
-               }
+               if (!wiphy)
+                       return -ENODEV;
                *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
 
@@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        }
                }
 
-               if (!*wdev) {
-                       err = -ENODEV;
-                       goto out_unlock;
-               }
+               if (!*wdev)
+                       return -ENODEV;
        }
 
        return 0;
- out_unlock:
-       rtnl_unlock();
-       return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
-       rtnl_unlock();
 }
 
 /* IE validation */
@@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
        int filter_wiphy = -1;
        struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
+       int ret;
 
        rtnl_lock();
        if (!cb->args[2]) {
                struct nl80211_dump_wiphy_state state = {
                        .filter_wiphy = -1,
                };
-               int ret;
 
                ret = nl80211_dump_wiphy_parse(skb, cb, &state);
                if (ret)
-                       return ret;
+                       goto out_unlock;
 
                filter_wiphy = state.filter_wiphy;
 
@@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                wp_idx++;
        }
  out:
-       rtnl_unlock();
-
        cb->args[0] = wp_idx;
        cb->args[1] = if_idx;
 
-       return skb->len;
+       ret = skb->len;
+ out_unlock:
+       rtnl_unlock();
+
+       return ret;
 }
 
 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
        int sta_idx = cb->args[2];
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out_err;
 
        if (!wdev->netdev) {
                err = -EINVAL;
@@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
        cb->args[2] = sta_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
 
        return err;
 }
@@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        int path_idx = cb->args[2];
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out_err;
 
        if (!rdev->ops->dump_mpath) {
                err = -EOPNOTSUPP;
@@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        cb->args[2] = path_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
        return err;
 }
 
@@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
        int path_idx = cb->args[2];
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out_err;
 
        if (!rdev->ops->dump_mpp) {
                err = -EOPNOTSUPP;
@@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
        cb->args[2] = path_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
        return err;
 }
 
@@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
        int start = cb->args[2], idx = 0;
        int err;
 
+       rtnl_lock();
        err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
-       if (err)
+       if (err) {
+               rtnl_unlock();
                return err;
+       }
 
        wdev_lock(wdev);
        spin_lock_bh(&rdev->bss_lock);
@@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
        wdev_unlock(wdev);
 
        cb->args[2] = idx;
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
 
        return skb->len;
 }
@@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
        int res;
        bool radio_stats;
 
+       rtnl_lock();
        res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (res)
-               return res;
+               goto out_err;
 
        /* prepare_wdev_dump parsed the attributes */
        radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
        cb->args[2] = survey_idx;
        res = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(rdev);
+       rtnl_unlock();
        return res;
 }
 
@@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
        void *data = NULL;
        unsigned int data_len = 0;
 
-       rtnl_lock();
-
        if (cb->args[0]) {
                /* subtract the 1 again here */
                struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
                struct wireless_dev *tmp;
 
-               if (!wiphy) {
-                       err = -ENODEV;
-                       goto out_unlock;
-               }
+               if (!wiphy)
+                       return -ENODEV;
                *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
 
@@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
        err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
                          attrbuf, nl80211_fam.maxattr, nl80211_policy);
        if (err)
-               goto out_unlock;
+               return err;
 
        if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
-           !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
-               err = -EINVAL;
-               goto out_unlock;
-       }
+           !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+               return -EINVAL;
 
        *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
        if (IS_ERR(*wdev))
                *wdev = NULL;
 
        *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
-       if (IS_ERR(*rdev)) {
-               err = PTR_ERR(*rdev);
-               goto out_unlock;
-       }
+       if (IS_ERR(*rdev))
+               return PTR_ERR(*rdev);
 
        vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
        subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
                if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
                        continue;
 
-               if (!vcmd->dumpit) {
-                       err = -EOPNOTSUPP;
-                       goto out_unlock;
-               }
+               if (!vcmd->dumpit)
+                       return -EOPNOTSUPP;
 
                vcmd_idx = i;
                break;
        }
 
-       if (vcmd_idx < 0) {
-               err = -EOPNOTSUPP;
-               goto out_unlock;
-       }
+       if (vcmd_idx < 0)
+               return -EOPNOTSUPP;
 
        if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
                data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
 
        /* keep rtnl locked in successful case */
        return 0;
- out_unlock:
-       rtnl_unlock();
-       return err;
 }
 
 static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
        int err;
        struct nlattr *vendor_data;
 
+       rtnl_lock();
        err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
        if (err)
-               return err;
+               goto out;
 
        vcmd_idx = cb->args[2];
        data = (void *)cb->args[3];
@@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
 
        if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
                           WIPHY_VENDOR_CMD_NEED_NETDEV)) {
-               if (!wdev)
-                       return -EINVAL;
+               if (!wdev) {
+                       err = -EINVAL;
+                       goto out;
+               }
                if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
-                   !wdev->netdev)
-                       return -EINVAL;
+                   !wdev->netdev) {
+                       err = -EINVAL;
+                       goto out;
+               }
 
                if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
-                       if (!wdev_running(wdev))
-                               return -ENETDOWN;
+                       if (!wdev_running(wdev)) {
+                               err = -ENETDOWN;
+                               goto out;
+                       }
                }
        }
 
index 4c93520..f3b1d7f 100644 (file)
@@ -1832,6 +1832,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
             info->output_pool != client->pool->size)) {
                if (snd_seq_write_pool_allocated(client)) {
                        /* remove all existing cells */
+                       snd_seq_pool_mark_closing(client->pool);
                        snd_seq_queue_client_leave_cells(client->number);
                        snd_seq_pool_done(client->pool);
                }
index 448efd4..33980d1 100644 (file)
@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
                return;
        *fifo = NULL;
 
+       if (f->pool)
+               snd_seq_pool_mark_closing(f->pool);
+
        snd_seq_fifo_clear(f);
 
        /* wake up clients if any */
index 1a1acf3..d4c61ec 100644 (file)
@@ -415,6 +415,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
        return 0;
 }
 
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+       unsigned long flags;
+
+       if (snd_BUG_ON(!pool))
+               return;
+       spin_lock_irqsave(&pool->lock, flags);
+       pool->closing = 1;
+       spin_unlock_irqrestore(&pool->lock, flags);
+}
+
 /* remove events */
 int snd_seq_pool_done(struct snd_seq_pool *pool)
 {
@@ -425,10 +437,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
                return -EINVAL;
 
        /* wait for closing all threads */
-       spin_lock_irqsave(&pool->lock, flags);
-       pool->closing = 1;
-       spin_unlock_irqrestore(&pool->lock, flags);
-
        if (waitqueue_active(&pool->output_sleep))
                wake_up(&pool->output_sleep);
 
@@ -485,6 +493,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
        *ppool = NULL;
        if (pool == NULL)
                return 0;
+       snd_seq_pool_mark_closing(pool);
        snd_seq_pool_done(pool);
        kfree(pool);
        return 0;
index 4a2ec77..32f959c 100644 (file)
@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
 int snd_seq_pool_init(struct snd_seq_pool *pool);
 
 /* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
 int snd_seq_pool_done(struct snd_seq_pool *pool);
 
 /* create pool */
index ab4cdab..79edd88 100644 (file)
@@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw)
                return err;
 
        /* Set DMA transfer mask */
-       if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+       if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
                dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
        } else {
                dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
index c15c51b..69266b8 100644 (file)
@@ -261,6 +261,7 @@ enum {
        CXT_FIXUP_HP_530,
        CXT_FIXUP_CAP_MIX_AMP_5047,
        CXT_FIXUP_MUTE_LED_EAPD,
+       CXT_FIXUP_HP_DOCK,
        CXT_FIXUP_HP_SPECTRE,
        CXT_FIXUP_HP_GATE_MIC,
 };
@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_mute_led_eapd,
        },
+       [CXT_FIXUP_HP_DOCK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x16, 0x21011020 }, /* line-out */
+                       { 0x18, 0x2181103f }, /* line-in */
+                       { }
+               }
+       },
        [CXT_FIXUP_HP_SPECTRE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
        SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+       SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
        SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -871,6 +881,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
        { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
        { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
+       { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
        {}
 };
 
index 4e11222..7f98989 100644 (file)
@@ -4847,6 +4847,7 @@ enum {
        ALC286_FIXUP_HP_GPIO_LED,
        ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
        ALC280_FIXUP_HP_DOCK_PINS,
+       ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
        ALC280_FIXUP_HP_9480M,
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -5388,6 +5389,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC280_FIXUP_HP_GPIO4
        },
+       [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x21011020 }, /* line-out */
+                       { 0x18, 0x2181103f }, /* line-in */
+                       { },
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
+       },
        [ALC280_FIXUP_HP_9480M] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc280_fixup_hp_9480m,
@@ -5647,7 +5658,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-       SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+       SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5816,6 +5827,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+       {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
        {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
        {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
        {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
@@ -6090,6 +6102,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC295_STANDARD_PINS,
                {0x17, 0x21014040},
                {0x18, 0x21a19050}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC295_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x17, 0x90170110}),
index 84c8f8f..8adf4d1 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig SND_X86
-       tristate "X86 sound devices"
+       bool "X86 sound devices"
        depends on X86
+       default y
        ---help---
          X86 sound devices that don't fall under SoC or PCI categories
 
index 70e389b..9b4d8ba 100644 (file)
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
 
        /* Last entry */
        if (curr->end == curr->start)
-               curr->end = roundup(curr->start, 4096);
+               curr->end = roundup(curr->start, 4096) + 4096;
 }
 
 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
index 67531f4..6a1ad58 100644 (file)
@@ -1,22 +1,23 @@
 LIBDIR := ../../../lib
-BPFOBJ := $(LIBDIR)/bpf/bpf.o
+BPFDIR := $(LIBDIR)/bpf
 
-CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ)
+CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
+LDLIBS += -lcap
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
 
 TEST_PROGS := test_kmod.sh
 
-all: $(TEST_GEN_PROGS)
+include ../lib.mk
+
+BPFOBJ := $(OUTPUT)/bpf.o
+
+$(TEST_GEN_PROGS): $(BPFOBJ)
 
-.PHONY: all clean force
+.PHONY: force
 
 # force a rebuild of BPFOBJ when its dependencies are updated
 force:
 
 $(BPFOBJ): force
-       $(MAKE) -C $(dir $(BPFOBJ))
-
-$(test_objs): $(BPFOBJ)
-
-include ../lib.mk
+       $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
index cada17a..a0aa200 100644 (file)
@@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
        assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
        key = 2;
        assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
-       key = 1;
-       assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
+       key = 3;
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+              errno == E2BIG);
 
        /* Check that key = 0 doesn't exist. */
        key = 0;
@@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
        close(fd);
 }
 
+static void test_hashmap_sizes(int task, void *data)
+{
+       int fd, i, j;
+
+       for (i = 1; i <= 512; i <<= 1)
+               for (j = 1; j <= 1 << 18; j <<= 1) {
+                       fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
+                                           2, map_flags);
+                       if (fd < 0) {
+                               printf("Failed to create hashmap key=%d value=%d '%s'\n",
+                                      i, j, strerror(errno));
+                               exit(1);
+                       }
+                       close(fd);
+                       usleep(10); /* give kernel time to destroy */
+               }
+}
+
 static void test_hashmap_percpu(int task, void *data)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data)
 static void test_arraymap_percpu_many_keys(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
-       unsigned int nr_keys = 20000;
+       /* nr_keys is not too large otherwise the test stresses percpu
+        * allocator more than anything else
+        */
+       unsigned int nr_keys = 2000;
        long values[nr_cpus];
        int key, fd, i;
 
@@ -419,6 +441,7 @@ static void test_map_stress(void)
 {
        run_parallel(100, test_hashmap, NULL);
        run_parallel(100, test_hashmap_percpu, NULL);
+       run_parallel(100, test_hashmap_sizes, NULL);
 
        run_parallel(100, test_arraymap, NULL);
        run_parallel(100, test_arraymap_percpu, NULL);