Merge tag 'kvm-ppc-fixes-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 18 Sep 2018 13:13:00 +0000 (15:13 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 18 Sep 2018 13:13:00 +0000 (15:13 +0200)
Second set of PPC KVM fixes for 4.19

Two fixes for KVM on POWER machines.  Both of these relate to memory
corruption and host crashes seen when transparent huge pages are
enabled.  The first fixes a host crash that can occur when a DMA
mapping is removed by the guest and the page mapped was part of a
transparent huge page; the second fixes corruption that could occur
when a hypervisor page fault for a radix guest is being serviced at
the same time that the backing page is being collapsed or split.

597 files changed:
Documentation/ABI/stable/sysfs-devices-system-xen_memory
Documentation/admin-guide/kernel-parameters.txt
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/sh_eth.txt
Documentation/filesystems/vfs.txt
Documentation/process/changes.rst
Documentation/process/code-of-conduct.rst [new file with mode: 0644]
Documentation/process/code-of-conflict.rst [deleted file]
Documentation/process/index.rst
Documentation/scsi/scsi-parameters.txt
Documentation/virtual/kvm/api.txt
Documentation/x86/earlyprintk.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/dma-mapping.h [new file with mode: 0644]
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
arch/arm64/include/asm/jump_label.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/crash_core.c [new file with mode: 0644]
arch/arm64/kernel/machine_kexec.c
arch/arm64/mm/mmu.c
arch/hexagon/include/asm/bitops.h
arch/hexagon/kernel/dma.c
arch/m68k/mm/mcfmmu.c
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
arch/mips/kernel/vdso.c
arch/mips/lantiq/xway/dma.c
arch/nds32/Kconfig
arch/nds32/Makefile
arch/nds32/include/asm/elf.h
arch/nds32/include/asm/ftrace.h [new file with mode: 0644]
arch/nds32/include/asm/nds32.h
arch/nds32/include/asm/uaccess.h
arch/nds32/kernel/Makefile
arch/nds32/kernel/atl2c.c
arch/nds32/kernel/ex-entry.S
arch/nds32/kernel/ex-exit.S
arch/nds32/kernel/ftrace.c [new file with mode: 0644]
arch/nds32/kernel/module.c
arch/nds32/kernel/stacktrace.c
arch/nds32/kernel/traps.c
arch/nds32/kernel/vmlinux.lds.S
arch/riscv/kernel/setup.c
arch/s390/crypto/paes_s390.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/gmap.c
arch/sparc/kernel/of_device_32.c
arch/sparc/kernel/of_device_64.c
arch/x86/events/intel/lbr.c
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/eisa.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/topology.c
arch/x86/kernel/tsc.c
arch/x86/mm/pgtable.c
arch/x86/platform/efi/efi_32.c
arch/xtensa/Kconfig
arch/xtensa/Makefile
arch/xtensa/platforms/iss/setup.c
block/bfq-cgroup.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-throttle.c
drivers/acpi/acpi_lpss.c
drivers/acpi/bus.c
drivers/android/binder_alloc.c
drivers/ata/libata-core.c
drivers/base/firmware_loader/main.c
drivers/base/memory.c
drivers/block/nbd.c
drivers/block/null_blk.h
drivers/block/null_blk_main.c
drivers/block/null_blk_zoned.c
drivers/block/rbd.c
drivers/char/Kconfig
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/ipmi/kcs_bmc.c
drivers/char/random.c
drivers/dax/device.c
drivers/dma/mic_x100_dma.c
drivers/firmware/arm_scmi/perf.c
drivers/fpga/dfl-fme-pr.c
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/fb_decoder.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hv/vmbus_drv.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx-lpi2c.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/i2c/busses/i2c-xiic.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/temperature/maxim_thermocouple.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/irqchip/irq-gic-v3-its.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/md-cluster.c
drivers/md/raid10.c
drivers/md/raid5-log.h
drivers/md/raid5.c
drivers/memory/ti-aemif.c
drivers/misc/hmc6352.c
drivers/misc/ibmvmc.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/hbm.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/omap_hsmmc.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qualcomm/qca_7k.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/qualcomm/qca_spi.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/Makefile
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/phy/sfp.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/xen-netfront.c
drivers/nvme/target/rdma.c
drivers/of/base.c
drivers/of/platform.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/switch/switchtec.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/s390/crypto/ap_bus.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/aacraid.h
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw.h
drivers/scsi/csiostor/csio_mb.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_lib.c
drivers/staging/erofs/Kconfig
drivers/staging/erofs/super.c
drivers/staging/fbtft/TODO
drivers/staging/gasket/TODO
drivers/staging/vboxvideo/vbox_drv.c
drivers/staging/vboxvideo/vbox_mode.c
drivers/staging/wilc1000/Makefile
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_debugfs.c
drivers/staging/wilc1000/wilc_wlan.c
drivers/staging/wilc1000/wilc_wlan_if.h
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/tty/hvc/hvc_console.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/class/cdc-wdm.c
drivers/usb/common/common.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/message.c
drivers/usb/core/of.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/fotg210-udc.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/misc/uss720.c
drivers/usb/misc/yurex.c
drivers/usb/mtu3/mtu3_core.c
drivers/usb/mtu3/mtu3_hw_regs.h
drivers/usb/serial/io_ti.h
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/bus.c
drivers/usb/typec/class.c
drivers/xen/Kconfig
drivers/xen/cpu_hotplug.c
drivers/xen/events/events_base.c
drivers/xen/gntdev.c
drivers/xen/manage.c
drivers/xen/mem-reservation.c
drivers/xen/xen-balloon.c
fs/afs/proc.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/ceph/super.c
fs/cifs/Kconfig
fs/cifs/cifs_unicode.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/pnfs.c
fs/nilfs2/alloc.c
fs/nilfs2/alloc.h
fs/nilfs2/bmap.c
fs/nilfs2/bmap.h
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/btree.h
fs/nilfs2/cpfile.c
fs/nilfs2/cpfile.h
fs/nilfs2/dat.c
fs/nilfs2/dat.h
fs/nilfs2/dir.c
fs/nilfs2/direct.c
fs/nilfs2/direct.h
fs/nilfs2/file.c
fs/nilfs2/gcinode.c
fs/nilfs2/ifile.c
fs/nilfs2/ifile.h
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/namei.c
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/segbuf.h
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/nilfs2/sufile.c
fs/nilfs2/sufile.h
fs/nilfs2/super.c
fs/nilfs2/sysfs.c
fs/nilfs2/sysfs.h
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
fs/notify/fsnotify.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/pstore/ram_core.c
include/asm-generic/io.h
include/linux/blk-cgroup.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/hid.h
include/linux/mlx5/driver.h
include/linux/mm_types.h
include/linux/mm_types_task.h
include/linux/mod_devicetable.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/timekeeping.h
include/linux/tracepoint.h
include/linux/vm_event_item.h
include/linux/vmacache.h
include/net/cfg80211.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/regulatory.h
include/uapi/linux/keyctl.h
include/uapi/linux/perf_event.h
include/uapi/linux/rds.h
include/uapi/linux/vhost.h
include/xen/mem-reservation.h
ipc/shm.c
kernel/bpf/sockmap.c
kernel/cpu.c
kernel/dma/direct.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/fork.c
kernel/jump_label.c
kernel/locking/lockdep.c
kernel/locking/mutex.c
kernel/locking/test-ww_mutex.c
kernel/printk/printk.c
kernel/printk/printk_safe.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/topology.c
kernel/time/clocksource.c
lib/Kconfig.debug
mm/Makefile
mm/backing-dev.c
mm/debug.c
mm/fadvise.c
mm/huge_memory.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/oom_kill.c
mm/page_alloc.c
mm/readahead.c
mm/util.c
mm/vmacache.c
net/core/filter.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dsa/dsa.c
net/ipv4/igmp.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/netfilter/Kconfig
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/route.c
net/iucv/af_iucv.c
net/iucv/iucv.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_ct.c
net/netfilter/xt_CHECKSUM.c
net/netfilter/xt_cluster.c
net/netfilter/xt_hashlimit.c
net/packet/af_packet.c
net/packet/internal.h
net/rds/Kconfig
net/rds/bind.c
net/rds/ib.c
net/rfkill/rfkill-gpio.c
net/sched/act_api.c
net/sched/act_ife.c
net/sched/act_pedit.c
net/sched/act_tunnel_key.c
net/sched/cls_api.c
net/sctp/proc.c
net/sctp/socket.c
net/tipc/bcast.c
net/tipc/diag.c
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/netlink.c
net/tipc/netlink_compat.c
net/tipc/socket.c
net/tipc/socket.h
net/tipc/topsrv.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
scripts/checkpatch.pl
scripts/depmod.sh
scripts/kconfig/Makefile
scripts/kconfig/check-pkgconfig.sh [deleted file]
scripts/kconfig/gconf-cfg.sh
scripts/kconfig/mconf-cfg.sh
scripts/kconfig/mconf.c
scripts/kconfig/nconf-cfg.sh
scripts/kconfig/qconf-cfg.sh
scripts/recordmcount.pl
scripts/setlocalversion
security/Kconfig
security/apparmor/secid.c
security/keys/dh.c
sound/core/rawmidi.c
sound/hda/ext/hdac_ext_stream.c
sound/pci/hda/hda_codec.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/bpf/bpftool/map.c
tools/hv/hv_kvp_daemon.c
tools/include/linux/lockdep.h
tools/include/linux/nmi.h [new file with mode: 0644]
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/vhost.h
tools/perf/Makefile.perf
tools/perf/arch/arm64/Makefile
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/arch/x86/include/arch-tests.h
tools/perf/arch/x86/tests/Build
tools/perf/arch/x86/tests/arch-tests.c
tools/perf/arch/x86/tests/bp-modify.c [new file with mode: 0644]
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/evsel.c
tools/perf/util/map.c
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-parse.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/vm/page-types.c
tools/vm/slabinfo.c

index caa311d..6d83f95 100644 (file)
@@ -75,3 +75,12 @@ Contact:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 Description:
                Amount (in KiB) of low (or normal) memory in the
                balloon.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/scrub_pages
+Date:          September 2018
+KernelVersion: 4.20
+Contact:       xen-devel@lists.xenproject.org
+Description:
+               Control scrubbing pages before returning them to Xen for others domains
+               use. Can be set with xen_scrub_pages cmdline
+               parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
index 9871e64..92eb1f4 100644 (file)
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
                        See Documentation/blockdev/ramdisk.txt.
 
+       random.trust_cpu={on,off}
+                       [KNL] Enable or disable trusting the use of the
+                       CPU's random number generator (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_CPU.
+
        ras=option[,option,...] [KNL] RAS-specific options
 
                cec_disable     [X86]
                        Disables the PV optimizations forcing the HVM guest to
                        run as generic HVM guest with no PV drivers.
 
+       xen_scrub_pages=        [XEN]
+                       Boolean option to control scrubbing pages before giving them back
+                       to Xen, for use by other domains. Can be also changed at runtime
+                       with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+                       Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index 390c145..52a719b 100644 (file)
@@ -348,3 +348,7 @@ Version History
 1.13.1  Fix deadlock caused by early md_stop_writes().  Also fix size an
        state races.
 1.13.2  Fix raid redundancy validation and avoid keeping raid set frozen
+1.14.0  Fix reshape race on small devices.  Fix stripe adding reshape
+       deadlock/potential data corruption.  Update superblock when
+       specific devices are requested via rebuild.  Fix RAID leg
+       rebuild errors.
index 00e4365..091c8df 100644 (file)
@@ -3,7 +3,6 @@
 Required properties:
 - compatible :
   - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
-  - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
 - reg : address and length of the lpi2c master registers
 - interrupts : lpi2c interrupt
 - clocks : lpi2c clock specifier
@@ -11,7 +10,7 @@ Required properties:
 Examples:
 
 lpi2c7: lpi2c7@40a50000 {
-       compatible = "fsl,imx8dv-lpi2c";
+       compatible = "fsl,imx7ulp-lpi2c";
        reg = <0x40A50000 0x10000>;
        interrupt-parent = <&intc>;
        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
index 4108936..b3acebe 100644 (file)
@@ -19,6 +19,10 @@ Required properties:
 - slaves               : Specifies number for slaves
 - active_slave         : Specifies the slave to use for time stamping,
                          ethtool and SIOCGMIIPHY
+- cpsw-phy-sel         : Specifies the phandle to the CPSW phy mode selection
+                         device. See also cpsw-phy-sel.txt for it's binding.
+                         Note that in legacy cases cpsw-phy-sel may be
+                         a child device instead of a phandle.
 
 Optional properties:
 - ti,hwmods            : Must be "cpgmac0"
@@ -75,6 +79,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
@@ -103,6 +108,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
index 76db9f1..abc3627 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
              "renesas,ether-r8a7794"  if the device is a part of R8A7794 SoC.
              "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
              "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+             "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
              "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
              "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
                                        device.
index 4b2084d..a6c6a8a 100644 (file)
@@ -848,7 +848,7 @@ struct file_operations
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-4.1, the following members are defined:
+4.18, the following members are defined:
 
 struct file_operations {
        struct module *owner;
@@ -858,11 +858,11 @@ struct file_operations {
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
+       int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
-       int (*mremap)(struct file *, struct vm_area_struct *);
        int (*open) (struct inode *, struct file *);
        int (*flush) (struct file *, fl_owner_t id);
        int (*release) (struct inode *, struct file *);
@@ -882,6 +882,10 @@ struct file_operations {
 #ifndef CONFIG_MMU
        unsigned (*mmap_capabilities)(struct file *);
 #endif
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
+       int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -899,6 +903,9 @@ otherwise noted.
 
   iterate: called when the VFS needs to read the directory contents
 
+  iterate_shared: called when the VFS needs to read the directory contents
+       when filesystem supports concurrent dir iterators
+
   poll: called by the VFS when a process wants to check if there is
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
@@ -951,6 +958,16 @@ otherwise noted.
 
   fallocate: called by the VFS to preallocate blocks or punch a hole.
 
+  copy_file_range: called by the copy_file_range(2) system call.
+
+  clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
+       FICLONE commands.
+
+  dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
+       command.
+
+  fadvise: possibly called by the fadvise64() system call.
+
 Note that the file operations are implemented by the specific
 filesystem in which the inode resides. When opening a device node
 (character or block special) most filesystems will call special
index 61f918b..d1bf143 100644 (file)
@@ -86,7 +86,7 @@ pkg-config
 
 The build system, as of 4.18, requires pkg-config to check for installed
 kconfig tools and to determine flags settings for use in
-'make {menu,n,g,x}config'.  Previously pkg-config was being used but not
+'make {g,x}config'.  Previously pkg-config was being used but not
 verified or documented.
 
 Flex
diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst
new file mode 100644 (file)
index 0000000..ab7c24b
--- /dev/null
@@ -0,0 +1,81 @@
+Contributor Covenant Code of Conduct
+++++++++++++++++++++++++++++++++++++
+
+Our Pledge
+==========
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and
+expression, level of experience, education, socio-economic status, nationality,
+personal appearance, race, religion, or sexual identity and orientation.
+
+Our Standards
+=============
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+  advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others’ private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+
+Our Responsibilities
+====================
+
+Maintainers are responsible for clarifying the standards of acceptable behavior
+and are expected to take appropriate and fair corrective action in response to
+any instances of unacceptable behavior.
+
+Maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+Scope
+=====
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+Enforcement
+===========
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the Technical Advisory Board (TAB) at
+<tab@lists.linux-foundation.org>. All complaints will be reviewed and
+investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. The TAB is obligated to maintain
+confidentiality with regard to the reporter of an incident.  Further details of
+specific enforcement policies may be posted separately.
+
+Maintainers who do not follow or enforce the Code of Conduct in good faith may
+face temporary or permanent repercussions as determined by other members of the
+project’s leadership.
+
+Attribution
+===========
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
diff --git a/Documentation/process/code-of-conflict.rst b/Documentation/process/code-of-conflict.rst
deleted file mode 100644 (file)
index 47b6de7..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Code of Conflict
-----------------
-
-The Linux kernel development effort is a very personal process compared
-to "traditional" ways of developing software.  Your code and ideas
-behind it will be carefully reviewed, often resulting in critique and
-criticism.  The review will almost always require improvements to the
-code before it can be included in the kernel.  Know that this happens
-because everyone involved wants to see the best possible solution for
-the overall success of Linux.  This development process has been proven
-to create the most robust operating system kernel ever, and we do not
-want to do anything to cause the quality of submission and eventual
-result to ever decrease.
-
-If however, anyone feels personally abused, threatened, or otherwise
-uncomfortable due to this process, that is not acceptable.  If so,
-please contact the Linux Foundation's Technical Advisory Board at
-<tab@lists.linux-foundation.org>, or the individual members, and they
-will work to resolve the issue to the best of their ability.  For more
-information on who is on the Technical Advisory Board and what their
-role is, please see:
-
-       - http://www.linuxfoundation.org/projects/linux/tab
-
-As a reviewer of code, please strive to keep things civil and focused on
-the technical issues involved.  We are all humans, and frustrations can
-be high on both sides of the process.  Try to keep in mind the immortal
-words of Bill and Ted, "Be excellent to each other."
index 37bd062..9ae3e31 100644 (file)
@@ -20,7 +20,7 @@ Below are the essential guides that every developer should read.
    :maxdepth: 1
 
    howto
-   code-of-conflict
+   code-of-conduct
    development-process
    submitting-patches
    coding-style
index 25a4b4c..92999d4 100644 (file)
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command
                        allowing boot to proceed.  none ignores them, expecting
                        user space to do the scan.
 
+       scsi_mod.use_blk_mq=
+                       [SCSI] use blk-mq I/O path by default
+                       See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
+                       Format: <y/n>
+
        sim710=         [SCSI,HW]
                        See header of drivers/scsi/sim710.c.
 
index c664064..8d8a372 100644 (file)
@@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
 Architectures: s390
 Parameters: none
 Returns: 0 on success, -EINVAL if hpage module parameter was not set
-        or cmma is enabled
+        or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
+        flag set
 
 With this capability the KVM support for memory backing with 1m pages
 through hugetlbfs can be enabled for a VM. After the capability is
index 688e3ee..46933e0 100644 (file)
@@ -35,25 +35,25 @@ and two USB cables, connected like this:
 ( If your system does not list a debug port capability then you probably
   won't be able to use the USB debug key. )
 
- b.) You also need a Netchip USB debug cable/key:
+ b.) You also need a NetChip USB debug cable/key:
 
         http://www.plxtech.com/products/NET2000/NET20DC/default.asp
 
-     This is a small blue plastic connector with two USB connections,
+     This is a small blue plastic connector with two USB connections;
      it draws power from its USB connections.
 
  c.) You need a second client/console system with a high speed USB 2.0
      port.
 
- d.) The Netchip device must be plugged directly into the physical
+ d.) The NetChip device must be plugged directly into the physical
      debug port on the "host/target" system.  You cannot use a USB hub in
      between the physical debug port and the "host/target" system.
 
      The EHCI debug controller is bound to a specific physical USB
-     port and the Netchip device will only work as an early printk
+     port and the NetChip device will only work as an early printk
      device in this port.  The EHCI host controllers are electrically
      wired such that the EHCI debug controller is hooked up to the
-     first physical and there is no way to change this via software.
+     first physical port and there is no way to change this via software.
      You can find the physical port through experimentation by trying
      each physical port on the system and rebooting.  Or you can try
      and use lsusb or look at the kernel info messages emitted by the
@@ -65,9 +65,9 @@ and two USB cables, connected like this:
      to the hardware vendor, because there is no reason not to wire
      this port into one of the physically accessible ports.
 
- e.) It is also important to note, that many versions of the Netchip
+ e.) It is also important to note, that many versions of the NetChip
      device require the "client/console" system to be plugged into the
-     right and side of the device (with the product logo facing up and
+     right hand side of the device (with the product logo facing up and
      readable left to right).  The reason being is that the 5 volt
      power supply is taken from only one side of the device and it
      must be the side that does not get rebooted.
@@ -81,13 +81,18 @@ and two USB cables, connected like this:
       CONFIG_EARLY_PRINTK_DBGP=y
 
     And you need to add the boot command line: "earlyprintk=dbgp".
+
     (If you are using Grub, append it to the 'kernel' line in
-     /etc/grub.conf)
+     /etc/grub.conf.  If you are using Grub2 on a BIOS firmware system,
+     append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
+     using Grub2 on an EFI firmware system, append it to the 'linux'
+     or 'linuxefi' line in /boot/grub2/grub.cfg or
+     /boot/efi/EFI/<distro>/grub.cfg.)
 
     On systems with more than one EHCI debug controller you must
     specify the correct EHCI debug controller number.  The ordering
     comes from the PCI bus enumeration of the EHCI controllers.  The
-    default with no number argument is "0" the first EHCI debug
+    default with no number argument is "0" or the first EHCI debug
     controller.  To use the second EHCI debug controller, you would
     use the command line: "earlyprintk=dbgp1"
 
@@ -111,7 +116,7 @@ and two USB cables, connected like this:
     see the raw output.
 
  c.) On Nvidia Southbridge based systems: the kernel will try to probe
-     and find out which port has debug device connected.
+     and find out which port has debug device connected.
 
 3. Testing that it works fine:
 
index 9ad052a..4ece30f 100644 (file)
@@ -2311,6 +2311,7 @@ F:        drivers/clocksource/cadence_ttc_timer.c
 F:     drivers/i2c/busses/i2c-cadence.c
 F:     drivers/mmc/host/sdhci-of-arasan.c
 F:     drivers/edac/synopsys_edac.c
+F:     drivers/i2c/busses/i2c-xiic.c
 
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
@@ -5624,6 +5625,8 @@ F:        lib/fault-inject.c
 
 FBTFT Framebuffer drivers
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/staging/fbtft/
 
@@ -6059,7 +6062,7 @@ F:        Documentation/gcc-plugins.txt
 
 GASKET DRIVER FRAMEWORK
 M:     Rob Springer <rspringer@google.com>
-M:     John Joseph <jnjoseph@google.com>
+M:     Todd Poynor <toddpoynor@google.com>
 M:     Ben Chan <benchan@chromium.org>
 S:     Maintained
 F:     drivers/staging/gasket/
@@ -7015,6 +7018,20 @@ F:       drivers/crypto/vmx/aes*
 F:     drivers/crypto/vmx/ghash*
 F:     drivers/crypto/vmx/ppc-xlate.pl
 
+IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpaphp*
+
+IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpadlpar*
+
 IBM ServeRAID RAID DRIVER
 S:     Orphan
 F:     drivers/scsi/ips.*
@@ -8299,7 +8316,7 @@ F:        include/linux/libata.h
 F:     Documentation/devicetree/bindings/ata/
 
 LIBLOCKDEP
-M:     Sasha Levin <alexander.levin@verizon.com>
+M:     Sasha Levin <alexander.levin@microsoft.com>
 S:     Maintained
 F:     tools/lib/lockdep/
 
@@ -11153,7 +11170,7 @@ F:      drivers/pci/controller/dwc/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Jingoo Han <jingoohan1@gmail.com>
-M:     Joao Pinto <Joao.Pinto@synopsys.com>
+M:     Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11345,10 +11362,10 @@ S:    Maintained
 F:     drivers/platform/x86/peaq-wmi.c
 
 PER-CPU MEMORY ALLOCATOR
+M:     Dennis Zhou <dennis@kernel.org>
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
-M:     Dennis Zhou <dennisszhou@gmail.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
 S:     Maintained
 F:     include/linux/percpu*.h
 F:     mm/percpu*.c
index 19948e5..83a03fa 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -616,6 +616,11 @@ CFLAGS_GCOV        := -fprofile-arcs -ftest-coverage \
        $(call cc-disable-warning,maybe-uninitialized,)
 export CFLAGS_GCOV
 
+# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
+ifdef CONFIG_FUNCTION_TRACER
+  CC_FLAGS_FTRACE := -pg
+endif
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -755,9 +760,6 @@ KBUILD_CFLAGS       += $(call cc-option, -femit-struct-debug-baseonly) \
 endif
 
 ifdef CONFIG_FUNCTION_TRACER
-ifndef CC_FLAGS_FTRACE
-CC_FLAGS_FTRACE := -pg
-endif
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
   # gcc 5 supports generating the mcount tables directly
   ifeq ($(call cc-option-yn,-mrecord-mcount),y)
index 6d5eb82..b4441b0 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_HAS_SG_CHAIN
@@ -28,8 +29,12 @@ config ARC
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IOREMAP_PROT
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZMA
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
@@ -44,11 +49,6 @@ config ARC
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
        select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_GENERIC_DMA_COHERENT
-       select HAVE_KERNEL_GZIP
-       select HAVE_KERNEL_LZMA
-       select ARCH_HAS_PTE_SPECIAL
 
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
index fb02619..99cce77 100644 (file)
@@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG
 LINUXINCLUDE   +=  -include ${src}/arch/arc/include/asm/current.h
 endif
 
-upto_gcc44    :=  $(call cc-ifversion, -le, 0404, y)
-atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
-
-cflags-$(atleast_gcc44)                        += -fsection-anchors
+cflags-y                               += -fsection-anchors
 
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
@@ -82,11 +79,6 @@ cflags-$(disable_small_data)         += -mno-sdata -fcall-used-gp
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
 ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
 
-# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
-# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
-# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
-ldflags-$(upto_gcc44)                  += -marclinux
-
 LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
index dc91c66..d75d65d 100644 (file)
                };
        };
 
+       /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
        /*
         * The DW APB ICTL intc on MB is connected to CPU intc via a
         * DT "invisible" DW APB GPIO block, configured to simply pass thru
index 69ff489..a05bb73 100644 (file)
                };
        };
 
+       /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
        /*
         * This INTC is actually connected to DW APB GPIO
         * which acts as a wire between MB INTC and CPU INTC.
index 47b74fb..37bafd4 100644 (file)
@@ -9,6 +9,10 @@
  */
 
 / {
+       aliases {
+               ethernet = &gmac;
+       };
+
        axs10x_mb {
                compatible = "simple-bus";
                #address-cells = <1>;
@@ -68,7 +72,7 @@
                        };
                };
 
-               ethernet@0x18000 {
+               gmac: ethernet@0x18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
                        max-speed = <100>;
                        resets = <&creg_rst 5>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
                ehci@0x40000 {
index 006aa3d..ef149f5 100644 (file)
                bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
        };
 
+       aliases {
+               ethernet = &gmac;
+       };
+
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        #clock-cells = <0>;
                };
 
-               ethernet@8000 {
+               gmac: ethernet@8000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = <0x8000 0x2000>;
                        phy-handle = <&phy0>;
                        resets = <&cgu_rst HSDK_ETH_RESET>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+                       dma-coherent;
 
                        mdio {
                                #address-cells = <1>;
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                ehci@40000 {
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                mmc@a000 {
                        clock-names = "biu", "ciu";
                        interrupts = <12>;
                        bus-width = <4>;
+                       dma-coherent;
                };
        };
 
index a635ea9..41bc08b 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index aa507e4..1e1c4a8 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index eba07f4..6b0c0cf 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 098b19f..240dd2c 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 0104c40..14ae7e5 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 6491be0..1dec2b4 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NO_HZ_IDLE=y
index 7c9c706..31ba224 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 99e05cf..8e0b8b1 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 0dc4f9b..739b90e 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index be3c30a..b5895bd 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 3a74b9b..f14eeff 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index ea2834b..025298a 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 80a5a1b..df7b77b 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 2cc87f9..a7f6531 100644 (file)
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index f629493..db47c35 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y
 CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 21f0ca2..a8ac5e9 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
index 4e00727..158af07 100644 (file)
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
        "1:     llock   %[orig], [%[ctr]]               \n"             \
        "       " #asm_op " %[val], %[orig], %[i]       \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val),                                          \
          [orig] "=&r" (orig)                                           \
        : [ctr] "r"     (&v->counter),                                  \
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..c946c0a
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier:  GPL-2.0
+// (C) 2018 Synopsys, Inc. (www.synopsys.com)
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-mapping.h>
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent);
+#define arch_setup_dma_ops arch_setup_dma_ops
+
+#endif
index 783b203..e8d9fb4 100644 (file)
@@ -83,9 +83,6 @@ done:
 static void show_faulting_vma(unsigned long address, char *buf)
 {
        struct vm_area_struct *vma;
-       struct inode *inode;
-       unsigned long ino = 0;
-       dev_t dev = 0;
        char *nm = buf;
        struct mm_struct *active_mm = current->active_mm;
 
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
         * if the container VMA is not found
         */
        if (vma && (vma->vm_start <= address)) {
-               struct file *file = vma->vm_file;
-               if (file) {
-                       nm = file_path(file, buf, PAGE_SIZE - 1);
-                       inode = file_inode(vma->vm_file);
-                       dev = inode->i_sb->s_dev;
-                       ino = inode->i_ino;
+               if (vma->vm_file) {
+                       nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+                       if (IS_ERR(nm))
+                               nm = "?";
                }
                pr_info("    @off 0x%lx in [%s]\n"
                        "    VMA: 0x%08lx to 0x%08lx\n",
index 25c6319..f2701c1 100644 (file)
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
 
        n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
                       perip_base,
-                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
+                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
 
        return buf;
 }
@@ -896,15 +896,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
        slc_op(start, sz, OP_FLUSH);
 }
 
-/*
- * DMA ops for systems with IOC
- * IOC hardware snoops all DMA traffic keeping the caches consistent with
- * memory - eliding need for any explicit cache maintenance of DMA buffers
- */
-static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
-
 /*
  * Exported DMA API
  */
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void)
 {
        unsigned int ioc_base, mem_sz;
 
+       /*
+        * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+        * simultaneously. This happens because as of today IOC aperture covers
+        * only ZONE_NORMAL (low mem) and any dma transactions outside this
+        * region won't be HW coherent.
+        * If we want to use both IOC and ZONE_HIGHMEM we can use
+        * bounce_buffer to handle dma transactions to HIGHMEM.
+        * Also it is possible to modify dma_direct cache ops or increase IOC
+        * aperture size if we are planning to use HIGHMEM without PAE.
+        */
+       if (IS_ENABLED(CONFIG_HIGHMEM))
+               panic("IOC and HIGHMEM can't be used simultaneously");
+
        /* Flush + invalidate + disable L1 dcache */
        __dc_disable();
 
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void)
        if (is_isa_arcv2() && ioc_enable)
                arc_ioc_setup();
 
-       if (is_isa_arcv2() && ioc_enable) {
-               __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
-               __dma_cache_inv = __dma_cache_inv_ioc;
-               __dma_cache_wback = __dma_cache_wback_ioc;
-       } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+       if (is_isa_arcv2() && l2_line_sz && slc_enable) {
                __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
                __dma_cache_inv = __dma_cache_inv_slc;
                __dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void)
                __dma_cache_inv = __dma_cache_inv_l1;
                __dma_cache_wback = __dma_cache_wback_l1;
        }
+       /*
+        * In case of IOC (say IOC+SLC case), pointers above could still be set
+        * but end up not being relevant as the first function in chain is not
+        * called at all for @dma_direct_ops
+        *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+        */
 }
 
 void __ref arc_cache_init(void)
index ec47e60..c75d5c3 100644 (file)
@@ -6,20 +6,17 @@
  * published by the Free Software Foundation.
  */
 
-/*
- * DMA Coherent API Notes
- *
- * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessing it using a kernel virtual address, with
- * Cache bit off in the TLB entry.
- *
- * The default DMA address == Phy address which is 0x8000_0000 based.
- */
-
 #include <linux/dma-noncoherent.h>
 #include <asm/cache.h>
 #include <asm/cacheflush.h>
 
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
+ *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ *  - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs)
 {
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        struct page *page;
        phys_addr_t paddr;
        void *kvaddr;
-       int need_coh = 1, need_kvaddr = 0;
-
-       page = alloc_pages(gfp, order);
-       if (!page)
-               return NULL;
+       bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
 
        /*
-        * IOC relies on all data (even coherent DMA data) being in cache
-        * Thus allocate normal cached memory
-        *
-        * The gains with IOC are two pronged:
-        *   -For streaming data, elides need for cache maintenance, saving
-        *    cycles in flush code, and bus bandwidth as all the lines of a
-        *    buffer need to be flushed out to memory
-        *   -For coherent data, Read/Write to buffers terminate early in cache
-        *   (vs. always going to memory - thus are faster)
+        * __GFP_HIGHMEM flag is cleared by upper layer functions
+        * (in include/linux/dma-mapping.h) so we should never get a
+        * __GFP_HIGHMEM here.
         */
-       if ((is_isa_arcv2() && ioc_enable) ||
-           (attrs & DMA_ATTR_NON_CONSISTENT))
-               need_coh = 0;
+       BUG_ON(gfp & __GFP_HIGHMEM);
 
-       /*
-        * - A coherent buffer needs MMU mapping to enforce non-cachability
-        * - A highmem page needs a virtual handle (hence MMU mapping)
-        *   independent of cachability
-        */
-       if (PageHighMem(page) || need_coh)
-               need_kvaddr = 1;
+       page = alloc_pages(gfp, order);
+       if (!page)
+               return NULL;
 
        /* This is linear addr (0x8000_0000 based) */
        paddr = page_to_phys(page);
 
        *dma_handle = paddr;
 
-       /* This is kernel Virtual address (0x7000_0000 based) */
-       if (need_kvaddr) {
+       /*
+        * A coherent buffer needs MMU mapping to enforce non-cachability.
+        * kvaddr is kernel Virtual address (0x7000_0000 based).
+        */
+       if (need_coh) {
                kvaddr = ioremap_nocache(paddr, size);
                if (kvaddr == NULL) {
                        __free_pages(page, order);
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 {
        phys_addr_t paddr = dma_handle;
        struct page *page = virt_to_page(paddr);
-       int is_non_coh = 1;
-
-       is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
-                       (is_isa_arcv2() && ioc_enable);
 
-       if (PageHighMem(page) || !is_non_coh)
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT))
                iounmap((void __force __iomem *)vaddr);
 
        __free_pages(page, get_order(size));
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
                break;
        }
 }
+
+/*
+ * Plug in coherent or noncoherent dma ops
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent)
+{
+       /*
+        * IOC hardware snoops all DMA traffic keeping the caches consistent
+        * with memory - eliding need for any explicit cache maintenance of
+        * DMA buffers - so we can use dma_direct cache ops.
+        */
+       if (is_isa_arcv2() && ioc_enable && coherent) {
+               set_dma_ops(dev, &dma_direct_ops);
+               dev_info(dev, "use dma_direct_ops cache ops\n");
+       } else {
+               set_dma_ops(dev, &dma_noncoherent_ops);
+               dev_info(dev, "use dma_noncoherent_ops cache ops\n");
+       }
+}
index ceffc40..48daec7 100644 (file)
@@ -46,6 +46,7 @@
        pinctrl-0 = <&mmc0_pins>;
        vmmc-supply = <&reg_cldo1>;
        cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
        status = "okay";
 };
 
@@ -56,6 +57,7 @@
        vqmmc-supply = <&reg_bldo2>;
        non-removable;
        cap-mmc-hw-reset;
+       bus-width = <8>;
        status = "okay";
 };
 
index 1b5e0e8..7e2b3e3 100644 (file)
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm goto("1: nop\n\t"
+       asm_volatile_goto("1: nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm goto("1: b %l[l_yes]\n\t"
+       asm_volatile_goto("1: b %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
index 95ac737..4c8b13b 100644 (file)
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC)             += machine_kexec.o relocate_kernel.o    \
 arm64-obj-$(CONFIG_ARM64_RELOC_TEST)   += arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)         += crash_dump.o
+arm64-obj-$(CONFIG_CRASH_CORE)         += crash_core.o
 arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)  += sdei.o
 arm64-obj-$(CONFIG_ARM64_SSBD)         += ssbd.o
 
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644 (file)
index 0000000..ca4c3e1
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/crash_core.h>
+#include <asm/memory.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+       VMCOREINFO_NUMBER(VA_BITS);
+       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+                                               kimage_voffset);
+       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+                                               PHYS_OFFSET);
+       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+}
index f6a5c6b..922add8 100644 (file)
@@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
        }
 }
 #endif /* CONFIG_HIBERNATION */
-
-void arch_crash_save_vmcoreinfo(void)
-{
-       VMCOREINFO_NUMBER(VA_BITS);
-       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
-       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
-                                               kimage_voffset);
-       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
-                                               PHYS_OFFSET);
-       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-}
index 65f8627..8080c9f 100644 (file)
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 
        pmd = READ_ONCE(*pmdp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pmd_present(pmd) || !pmd_table(pmd)) {
+       if (!pmd_present(pmd))
+               return 1;
+       if (!pmd_table(pmd)) {
                VM_WARN_ON(!pmd_table(pmd));
                return 1;
        }
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
 
        pud = READ_ONCE(*pudp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pud_present(pud) || !pud_table(pud)) {
+       if (!pud_present(pud))
+               return 1;
+       if (!pud_table(pud)) {
                VM_WARN_ON(!pud_table(pud));
                return 1;
        }
index 5e4a59b..2691a18 100644 (file)
@@ -211,7 +211,7 @@ static inline long ffz(int x)
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
        int r;
 
@@ -232,7 +232,7 @@ static inline long fls(int x)
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
        int r;
 
index 77459df..7ebe7ad 100644 (file)
@@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
                        panic("Can't create %s() memory pool!", __func__);
                else
                        gen_pool_add(coherent_pool,
-                               pfn_to_virt(max_low_pfn),
+                               (unsigned long)pfn_to_virt(max_low_pfn),
                                hexagon_coherent_pool_size, -1);
        }
 
index 70dde04..f5453d9 100644 (file)
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void)
        high_memory = (void *)_ramend;
 
        /* Reserve kernel text/data/bss */
-       memblock_reserve(memstart, memstart - _rambase);
+       memblock_reserve(_rambase, memstart - _rambase);
 
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
index 4901833..8441b26 100644 (file)
@@ -40,6 +40,7 @@ struct ltq_dma_channel {
        int desc;                       /* the current descriptor */
        struct ltq_dma_desc *desc_base; /* the descriptor base */
        int phys;                       /* physical addr */
+       struct device *dev;
 };
 
 enum {
index 019035d..8f845f6 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -20,6 +21,7 @@
 
 #include <asm/abi.h>
 #include <asm/mips-cps.h>
+#include <asm/page.h>
 #include <asm/vdso.h>
 
 /* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        vvar_size = gic_size + PAGE_SIZE;
        size = vvar_size + image->size;
 
+       /*
+        * Find a region that's large enough for us to perform the
+        * colour-matching alignment below.
+        */
+       if (cpu_has_dc_aliases)
+               size += shm_align_mask + 1;
+
        base = get_unmapped_area(NULL, 0, size, 0, 0);
        if (IS_ERR_VALUE(base)) {
                ret = base;
                goto out;
        }
 
+       /*
+        * If we suffer from dcache aliasing, ensure that the VDSO data page
+        * mapping is coloured the same as the kernel's mapping of that memory.
+        * This ensures that when the kernel updates the VDSO data userland
+        * will observe it without requiring cache invalidations.
+        */
+       if (cpu_has_dc_aliases) {
+               base = __ALIGN_MASK(base, shm_align_mask);
+               base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+       }
+
        data_addr = base + gic_size;
        vdso_addr = data_addr + PAGE_SIZE;
 
index 4b9fbb6..664f2f7 100644 (file)
@@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
        unsigned long flags;
 
        ch->desc = 0;
-       ch->desc_base = dma_zalloc_coherent(NULL,
+       ch->desc_base = dma_zalloc_coherent(ch->dev,
                                LTQ_DESC_NUM * LTQ_DESC_SIZE,
                                &ch->phys, GFP_ATOMIC);
 
@@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
        if (!ch->desc_base)
                return;
        ltq_dma_close(ch);
-       dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+       dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
                ch->desc_base, ch->phys);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_free);
index 1d4248f..7068f34 100644 (file)
@@ -40,6 +40,10 @@ config NDS32
        select NO_IOPORT_MAP
        select RTC_LIB
        select THREAD_INFO_IN_TASK
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_DYNAMIC_FTRACE
        help
          Andes(nds32) Linux support.
 
index 63f4f17..3509fac 100644 (file)
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig
 
 comma = ,
 
+ifdef CONFIG_FUNCTION_TRACER
+arch-y += -malways-save-lp -mno-relax
+endif
+
 KBUILD_CFLAGS  += $(call cc-option, -mno-sched-prolog-epilog)
 KBUILD_CFLAGS  += -mcmodel=large
 
index 56c4790..f5f9cf7 100644 (file)
@@ -121,9 +121,9 @@ struct elf32_hdr;
  */
 #define ELF_CLASS      ELFCLASS32
 #ifdef __NDS32_EB__
-#define ELF_DATA       ELFDATA2MSB;
+#define ELF_DATA       ELFDATA2MSB
 #else
-#define ELF_DATA       ELFDATA2LSB;
+#define ELF_DATA       ELFDATA2LSB
 #endif
 #define ELF_ARCH       EM_NDS32
 #define USE_ELF_CORE_DUMP
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..2f96cc9
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_NDS32_FTRACE_H
+#define __ASM_NDS32_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+/* mcount call is composed of three instructions:
+ * sethi + ori + jral
+ */
+#define MCOUNT_INSN_SIZE 12
+
+extern void _mcount(unsigned long parent_ip);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_ADDR ((unsigned long)_ftrace_caller)
+
+#ifdef __NDS32_EL__
+#define INSN_NOP               0x09000040
+#define INSN_SIZE(insn)                (((insn & 0x00000080) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0x000000fe) == 0x00000046)
+#define ENDIAN_CONVERT(insn)   be32_to_cpu(insn)
+#else /* __NDS32_EB__ */
+#define INSN_NOP               0x40000009
+#define INSN_SIZE(insn)                (((insn & 0x80000000) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0xfe000000) == 0x46000000)
+#define ENDIAN_CONVERT(insn)   (insn)
+#endif
+
+extern void _ftrace_caller(unsigned long parent_ip);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       return addr;
+}
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* __ASM_NDS32_FTRACE_H */
index 19b1939..68c3815 100644 (file)
@@ -17,6 +17,7 @@
 #else
 #define FP_OFFSET (-2)
 #endif
+#define LP_OFFSET (-1)
 
 extern void __init early_trap_init(void);
 static inline void GIE_ENABLE(void)
index 18a009f..362a32d 100644 (file)
@@ -38,7 +38,7 @@ struct exception_table_entry {
 extern int fixup_exception(struct pt_regs *regs);
 
 #define KERNEL_DS      ((mm_segment_t) { ~0UL })
-#define USER_DS        ((mm_segment_t) {TASK_SIZE - 1})
+#define USER_DS                ((mm_segment_t) {TASK_SIZE - 1})
 
 #define get_ds()       (KERNEL_DS)
 #define get_fs()       (current_thread_info()->addr_limit)
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b)    ((a) == (b))
+#define segment_eq(a, b)       ((a) == (b))
 
 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
 
-#define access_ok(type, addr, size)                 \
+#define access_ok(type, addr, size)    \
        __range_ok((unsigned long)addr, (unsigned long)size)
 /*
  * Single-value transfer routines.  They automatically use the right
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs)
  * versions are void (ie, don't return a value as such).
  */
 
-#define get_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_READ,  p, sizeof(*p)))) {            \
-               __e = __get_user(x,p);                                  \
-       } else                                                          \
-               x = 0;                                                  \
-       __e;                                                            \
-})
-#define __get_user(x,ptr)                                              \
+#define get_user       __get_user                                      \
+
+#define __get_user(x, ptr)                                             \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       __get_user_err((x),(ptr),__gu_err);                             \
+       __get_user_check((x), (ptr), __gu_err);                         \
        __gu_err;                                                       \
 })
 
-#define __get_user_error(x,ptr,err)                                    \
+#define __get_user_error(x, ptr, err)                                  \
 ({                                                                     \
-       __get_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __get_user_check((x), (ptr), (err));                            \
+       (void)0;                                                        \
 })
 
-#define __get_user_err(x,ptr,err)                                      \
+#define __get_user_check(x, ptr, err)                                  \
+({                                                                     \
+       const __typeof__(*(ptr)) __user *__p = (ptr);                   \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
+               __get_user_err((x), __p, (err));                        \
+       } else {                                                        \
+               (x) = 0; (err) = -EFAULT;                               \
+       }                                                               \
+})
+
+#define __get_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm("lbi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lbi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm("lhi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lhi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm("lwi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lwi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __get_user_asm_dword(__gu_val,__gu_addr,err);           \
+               __get_user_asm_dword(__gu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
                break;                                                  \
        }                                                               \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 } while (0)
 
-#define __get_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move %0, %3\n"                                          \
-       "       move %1, #0\n"                                          \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err), "=&r" (x)                                         \
-       : "r" (addr), "i" (-EFAULT)                                     \
-       : "cc")
+#define __get_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move %0, %3\n"                                  \
+               "       move %1, #0\n"                                  \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err), "=&r" (x)                                 \
+               : "r" (addr), "i" (-EFAULT)                             \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __gu_reg_oper0 "%H1"
@@ -149,61 +152,66 @@ do {                                                                      \
 #endif
 
 #define __get_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                           \
-       "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err), "=&r"(x)                                           \
-       : "r"(addr), "i"(-EFAULT)                                       \
-       : "cc")
-#define put_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_WRITE,  p, sizeof(*p)))) {           \
-               __e = __put_user(x,p);                                  \
-       }                                                               \
-       __e;                                                            \
-})
-#define __put_user(x,ptr)                                              \
+       __asm__ __volatile__ (                                          \
+               "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                   \
+               "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err), "=&r"(x)                                   \
+               : "r"(addr), "i"(-EFAULT)                               \
+               : "cc")
+
+#define put_user       __put_user                                      \
+
+#define __put_user(x, ptr)                                             \
 ({                                                                     \
        long __pu_err = 0;                                              \
-       __put_user_err((x),(ptr),__pu_err);                             \
+       __put_user_err((x), (ptr), __pu_err);                           \
        __pu_err;                                                       \
 })
 
-#define __put_user_error(x,ptr,err)                                    \
+#define __put_user_error(x, ptr, err)                                  \
+({                                                                     \
+       __put_user_err((x), (ptr), (err));                              \
+       (void)0;                                                        \
+})
+
+#define __put_user_check(x, ptr, err)                                  \
 ({                                                                     \
-       __put_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
+               __put_user_err((x), __p, (err));                        \
+       } else  {                                                       \
+               (err) = -EFAULT;                                        \
+       }                                                               \
 })
 
-#define __put_user_err(x,ptr,err)                                      \
+#define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __put_user_asm("sbi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("sbi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm("shi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("shi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm("swi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("swi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __put_user_asm_dword(__pu_val,__pu_addr,err);           \
+               __put_user_asm_dword(__pu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
@@ -211,22 +219,22 @@ do {                                                                      \
        }                                                               \
 } while (0)
 
-#define __put_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move    %0, %3\n"                                       \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err)                                                    \
-       : "r" (x), "r" (addr), "i" (-EFAULT)                            \
-       : "cc")
+#define __put_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move    %0, %3\n"                               \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err)                                            \
+               : "r" (x), "r" (addr), "i" (-EFAULT)                    \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __pu_reg_oper0 "%H2"
@@ -237,23 +245,24 @@ do {                                                                      \
 #endif
 
 #define __put_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                           \
-       "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err)                                                     \
-       : "r"(addr), "r"(x), "i"(-EFAULT)                               \
-       : "cc")
+       __asm__ __volatile__ (                                          \
+               "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                   \
+               "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err)                                             \
+               : "r"(addr), "r"(x), "i"(-EFAULT)                       \
+               : "cc")
+
 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
 extern long strncpy_from_user(char *dest, const char __user * src, long count);
 extern __must_check long strlen_user(const char __user * str);
index 4279274..27cded3 100644 (file)
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds
 
 
 obj-y                          += vdso/
+
+obj-$(CONFIG_FUNCTION_TRACER)   += ftrace.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
index 0c6d031..0c5386e 100644 (file)
@@ -9,7 +9,8 @@
 
 void __iomem *atl2c_base;
 static const struct of_device_id atl2c_ids[] __initconst = {
-       {.compatible = "andestech,atl2c",}
+       {.compatible = "andestech,atl2c",},
+       {}
 };
 
 static int __init atl2c_of_init(void)
index b8ae4e9..21a1440 100644 (file)
@@ -118,7 +118,7 @@ common_exception_handler:
        /* interrupt */
 2:
 #ifdef CONFIG_TRACE_IRQFLAGS
-       jal     trace_hardirqs_off
+       jal     __trace_hardirqs_off
 #endif
        move    $r0, $sp
        sethi   $lp, hi20(ret_from_intr)
index 03e4f77..f00af92 100644 (file)
@@ -138,8 +138,8 @@ no_work_pending:
 #ifdef CONFIG_TRACE_IRQFLAGS
        lwi     $p0, [$sp+(#IPSW_OFFSET)]
        andi    $p0, $p0, #0x1
-       la      $r10, trace_hardirqs_off
-       la      $r9, trace_hardirqs_on
+       la      $r10, __trace_hardirqs_off
+       la      $r9, __trace_hardirqs_on
        cmovz   $r9, $p0, $r10
        jral    $r9
 #endif
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..a0a9679
--- /dev/null
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+                                    struct ftrace_ops*, struct pt_regs*);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+extern void ftrace_graph_caller(void);
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void _mcount(unsigned long parent_ip)
+{
+       /* save all state by the compiler prologue */
+
+       unsigned long ip = (unsigned long)__builtin_return_address(0);
+
+       if (ftrace_trace_function != ftrace_stub)
+               ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
+                                     NULL, NULL);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
+           || ftrace_graph_entry != ftrace_graph_entry_stub)
+               ftrace_graph_caller();
+#endif
+
+       /* restore all state by the compiler epilogue */
+}
+EXPORT_SYMBOL(_mcount);
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void __naked _mcount(unsigned long parent_ip)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+EXPORT_SYMBOL(_mcount);
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+void _ftrace_caller(unsigned long parent_ip)
+{
+       /* save all state needed by the compiler prologue */
+
+       /*
+        * prepare arguments for real tracing function
+        * first  arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
+        * second arg : parent_ip
+        */
+       __asm__ __volatile__ (
+               "move $r1, %0                              \n\t"
+               "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
+               :
+               : "r" (parent_ip), "r" (__builtin_return_address(0)));
+
+       /* a placeholder for the call to a real tracing function */
+       __asm__ __volatile__ (
+               "ftrace_call:           \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* a placeholder for the call to ftrace_graph_caller */
+       __asm__ __volatile__ (
+               "ftrace_graph_call:     \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+#endif
+       /* restore all state needed by the compiler epilogue */
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+       return 0;
+}
+
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       return 0;
+}
+
+static unsigned long gen_sethi_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x46000000;
+       unsigned long imm = addr >> 12;
+       unsigned long rt_num = 0xf << 20;
+
+       return ENDIAN_CONVERT(opcode | rt_num | imm);
+}
+
+static unsigned long gen_ori_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x58000000;
+       unsigned long imm = addr & 0x0000fff;
+       unsigned long rt_num = 0xf << 20;
+       unsigned long ra_num = 0xf << 15;
+
+       return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
+}
+
+static unsigned long gen_jral_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x4a000001;
+       unsigned long rt_num = 0x1e << 20;
+       unsigned long rb_num = 0xf << 10;
+
+       return ENDIAN_CONVERT(opcode | rt_num | rb_num);
+}
+
+static void ftrace_gen_call_insn(unsigned long *call_insns,
+                                unsigned long addr)
+{
+       call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u       */
+       call_insns[1] = gen_ori_insn(addr);   /* ori   $r15, $r15, imm15u */
+       call_insns[2] = gen_jral_insn(addr);  /* jral  $lp,  $r15         */
+}
+
+static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                               unsigned long *new_insn, bool validate)
+{
+       unsigned long orig_insn[3];
+
+       if (validate) {
+               if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+                       return -EFAULT;
+               if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
+                       return -EINVAL;
+       }
+
+       if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       return 0;
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                             unsigned long *new_insn, bool validate)
+{
+       int ret;
+
+       ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
+       if (ret)
+               return ret;
+
+       flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+       return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long pc = (unsigned long)&ftrace_call;
+       unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       if (func != ftrace_stub)
+               ftrace_gen_call_insn(new_insn, (unsigned long)func);
+
+       return ftrace_modify_code(pc, old_insn, new_insn, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, nop_insn, call_insn, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+                   unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       struct ftrace_graph_ent trace;
+       unsigned long old;
+       int err;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+
+       trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
+               return;
+
+       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+                                      frame_pointer, NULL);
+
+       if (err == -EBUSY)
+               return;
+
+       *parent = return_hooker;
+}
+
+noinline void ftrace_graph_caller(void)
+{
+       unsigned long *parent_ip =
+               (unsigned long *)(__builtin_frame_address(2) - 4);
+
+       unsigned long selfpc =
+               (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
+
+       unsigned long frame_pointer =
+               (unsigned long)__builtin_frame_address(3);
+
+       prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
+}
+
+extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+void __naked return_to_handler(void)
+{
+       __asm__ __volatile__ (
+               /* save state needed by the ABI     */
+               "smw.adm $r0,[$sp],$r1,#0x0  \n\t"
+
+               /* get original return address      */
+               "move $r0, $fp               \n\t"
+               "bal ftrace_return_to_handler\n\t"
+               "move $lp, $r0               \n\t"
+
+               /* restore state nedded by the ABI  */
+               "lmw.bim $r0,[$sp],$r1,#0x0  \n\t");
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+       unsigned long pc = (unsigned long)&ftrace_graph_call;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
+
+       if (enable)
+               return ftrace_modify_code(pc, nop_insn, call_insn, true);
+       else
+               return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+noinline void __trace_hardirqs_off(void)
+{
+       trace_hardirqs_off();
+}
+noinline void __trace_hardirqs_on(void)
+{
+       trace_hardirqs_on();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
index 4167283..1e31829 100644 (file)
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
index 8b231e9..d974c0c 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
+#include <linux/ftrace.h>
 
 void save_stack_trace(struct stack_trace *trace)
 {
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        unsigned long *fpn;
        int skip = trace->skip;
        int savesched;
+       int graph_idx = 0;
 
        if (tsk == current) {
                __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
               && (fpn >= (unsigned long *)TASK_SIZE)) {
                unsigned long lpp, fpp;
 
-               lpp = fpn[-1];
+               lpp = fpn[LP_OFFSET];
                fpp = fpn[FP_OFFSET];
                if (!__kernel_text_address(lpp))
                        break;
+               else
+                       lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
 
                if (savesched || !in_sched_functions(lpp)) {
                        if (skip) {
index a6205fd..1496aab 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kdebug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/uaccess.h>
+#include <linux/ftrace.h>
 
 #include <asm/proc-fns.h>
 #include <asm/unistd.h>
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs)
        set_fs(fs);
 }
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#include <linux/ftrace.h>
-static void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-       if (*addr == (unsigned long)return_to_handler) {
-               int index = tsk->curr_ret_stack;
-
-               if (tsk->ret_stack && index >= *graph) {
-                       index -= *graph;
-                       *addr = tsk->ret_stack[index].ret;
-                       (*graph)++;
-               }
-       }
-}
-#else
-static inline void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-}
-#endif
-
 #define LOOP_TIMES (100)
 static void __dump(struct task_struct *tsk, unsigned long *base_reg)
 {
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                while (!kstack_end(base_reg)) {
                        ret_addr = *base_reg++;
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                       !((unsigned long)base_reg & 0x3) &&
                       ((unsigned long)base_reg >= TASK_SIZE)) {
                        unsigned long next_fp;
-#if !defined(NDS32_ABI_2)
-                       ret_addr = base_reg[0];
-                       next_fp = base_reg[1];
-#else
-                       ret_addr = base_reg[-1];
+                       ret_addr = base_reg[LP_OFFSET];
                        next_fp = base_reg[FP_OFFSET];
-#endif
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err)
        pr_emerg("CPU: %i\n", smp_processor_id());
        show_regs(regs);
        pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
-                tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
+                tsk->comm, tsk->pid, end_of_stack(tsk));
 
        if (!user_mode(regs) || in_interrupt()) {
-               dump_mem("Stack: ", regs->sp,
-                        THREAD_SIZE + (unsigned long)task_thread_info(tsk));
+               dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
                dump_instr(regs);
                dump_stack();
        }
index 288313b..9e90f30 100644 (file)
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32)
 ENTRY(_stext_lma)
 jiffies = jiffies_64;
 
+#if defined(CONFIG_GCOV_KERNEL)
+#define NDS32_EXIT_KEEP(x)     x
+#else
+#define NDS32_EXIT_KEEP(x)
+#endif
+
 SECTIONS
 {
        _stext_lma = TEXTADDR - LOAD_OFFSET;
        . = TEXTADDR;
        __init_begin = .;
        HEAD_TEXT_SECTION
+       .exit.text : {
+               NDS32_EXIT_KEEP(EXIT_TEXT)
+       }
        INIT_TEXT_SECTION(PAGE_SIZE)
        INIT_DATA_SECTION(16)
+       .exit.data : {
+               NDS32_EXIT_KEEP(EXIT_DATA)
+       }
        PERCPU_SECTION(L1_CACHE_BYTES)
        __init_end = .;
 
index db20dc6..aee6031 100644 (file)
@@ -85,15 +85,8 @@ atomic_t hart_lottery;
 #ifdef CONFIG_BLK_DEV_INITRD
 static void __init setup_initrd(void)
 {
-       extern char __initramfs_start[];
-       extern unsigned long __initramfs_size;
        unsigned long size;
 
-       if (__initramfs_size > 0) {
-               initrd_start = (unsigned long)(&__initramfs_start);
-               initrd_end = initrd_start + __initramfs_size;
-       }
-
        if (initrd_start >= initrd_end) {
                printk(KERN_INFO "initrd not found or empty");
                goto disable;
index 80b2729..ab9a0eb 100644 (file)
@@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
                              walk->dst.virt.addr, walk->src.virt.addr, n);
                if (k)
                        ret = blkcipher_walk_done(desc, walk, nbytes - k);
-               if (n < k) {
+               if (k < n) {
                        if (__cbc_paes_set_key(ctx) != 0)
                                return blkcipher_walk_done(desc, walk, -EIO);
                        memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
index f69333f..ac5da6b 100644 (file)
@@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_S390_HPAGE_1M:
                r = 0;
-               if (hpage)
+               if (hpage && !kvm_is_ucontrol(kvm))
                        r = 1;
                break;
        case KVM_CAP_S390_MEM_OP:
@@ -691,7 +691,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                mutex_lock(&kvm->lock);
                if (kvm->created_vcpus)
                        r = -EBUSY;
-               else if (!hpage || kvm->arch.use_cmma)
+               else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
                        r = -EINVAL;
                else {
                        r = 0;
index bb44990..911c7de 100644 (file)
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
                vmaddr |= gaddr & ~PMD_MASK;
                /* Find vma in the parent mm */
                vma = find_vma(gmap->mm, vmaddr);
+               if (!vma)
+                       continue;
                /*
                 * We do not discard pages that are backed by
                 * hugetlbfs, so we don't have to refault them.
                 */
-               if (vma && is_vm_hugetlb_page(vma))
+               if (is_vm_hugetlb_page(vma))
                        continue;
                size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
                zap_page_range(vma, vmaddr, size);
index 3641a29..e4abe9b 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irq.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
 #include <asm/leon.h>
 #include <asm/leon_amba.h>
 
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
 
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
+
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
                       dp->full_name);
index 44e4d44..6df6086 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mod_devicetable.h>
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
                dev_set_name(&op->dev, "root");
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
 
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
index f3e006b..c88ed39 100644 (file)
@@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
 
        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
+
+       /* Knights Landing does have MISPREDICT bit */
+       if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+               x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
 }
index b143717..ce84388 100644 (file)
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 
 /**
  * arch_atomic_inc - increment atomic variable
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic_inc arch_atomic_inc
 static __always_inline void arch_atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_inc arch_atomic_inc
 
 /**
  * arch_atomic_dec - decrement atomic variable
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic_dec arch_atomic_dec
 static __always_inline void arch_atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_dec arch_atomic_dec
 
 /**
  * arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
 }
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 
 /**
  * arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 
 /**
  * arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic_add_negative arch_atomic_add_negative
 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic_add_negative arch_atomic_add_negative
 
 /**
  * arch_atomic_add_return - add integer and return
index ef959f0..6a5b0ec 100644 (file)
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static inline void arch_atomic64_inc(atomic64_t *v)
 {
        __alternative_atomic64(inc, inc_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static inline void arch_atomic64_dec(atomic64_t *v)
 {
        __alternative_atomic64(dec, dec_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_add_unless - add unless the number is a given value
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
        return (int)a;
 }
 
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
                             "S" (v) : "ecx", "edx", "memory");
        return r;
 }
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        long long r;
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
                             "S" (v) : "ecx", "memory");
        return r;
 }
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 #undef alternative_atomic64
 #undef __alternative_atomic64
index 4343d9b..5f851d9 100644 (file)
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 
 /**
  * arch_atomic64_inc - increment atomic64 variable
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 
 /**
  * arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 
 /**
  * arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic64_add_negative arch_atomic64_add_negative
 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic64_add_negative arch_atomic64_add_negative
 
 /**
  * arch_atomic64_add_return - add and return
index 395c963..75f1e35 100644 (file)
@@ -22,10 +22,20 @@ enum die_val {
        DIE_NMIUNKNOWN,
 };
 
+enum show_regs_mode {
+       SHOW_REGS_SHORT,
+       /*
+        * For when userspace crashed, but we don't think it's our fault, and
+        * therefore don't print kernel registers.
+        */
+       SHOW_REGS_USER,
+       SHOW_REGS_ALL
+};
+
 extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, int all);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
 extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
index 24c6cf5..60d0f90 100644 (file)
@@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
-#endif
        *pmdp = pmd;
 }
 
@@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #ifdef CONFIG_SMP
 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
-#endif
        return __pmd(xchg((pmdval_t *)xp, 0));
 }
 #else
@@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #ifdef CONFIG_SMP
 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
-#endif
        return __pud(xchg((pudval_t *)xp, 0));
 }
 #else
index e4ffa56..690c030 100644 (file)
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                return xchg(pmdp, pmd);
        } else {
                pmd_t old = *pmdp;
-               *pmdp = pmd;
+               WRITE_ONCE(*pmdp, pmd);
                return old;
        }
 }
index f773d5e..ce2b590 100644 (file)
@@ -55,15 +55,15 @@ struct mm_struct;
 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
 
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-                                   pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
 {
-       *ptep = native_make_pte(0);
+       WRITE_ONCE(*ptep, pte);
 }
 
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                   pte_t *ptep)
 {
-       *ptep = pte;
+       native_set_pte(ptep, native_make_pte(0));
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       *pmdp = pmd;
+       WRITE_ONCE(*pmdp, pmd);
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-       *pudp = pud;
+       WRITE_ONCE(*pudp, pud);
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        pgd_t pgd;
 
        if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
-               *p4dp = p4d;
+               WRITE_ONCE(*p4dp, p4d);
                return;
        }
 
        pgd = native_make_pgd(native_p4d_val(p4d));
        pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
-       *p4dp = native_make_p4d(native_pgd_val(pgd));
+       WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
 }
 
 static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       *pgdp = pti_set_user_pgtbl(pgdp, pgd);
+       WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
index 9f148e3..7654feb 100644 (file)
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
        if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
                /* Something in the core code broke! Survive gracefully */
                pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
-               return EINVAL;
+               return -EINVAL;
        }
 
        ret = assign_managed_vector(irqd, vector_searchmask);
index ec00d1f..f7151cd 100644 (file)
@@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
        return 0;
 }
 
+#ifdef CONFIG_PROC_FS
 static int proc_apm_show(struct seq_file *m, void *v)
 {
        unsigned short  bx;
@@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
                   units);
        return 0;
 }
+#endif
 
 static int apm(void *unused)
 {
index 0624957..07b5fc0 100644 (file)
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
+       enum ucode_state ret;
        u32 rev, dummy;
 
        BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
-               c->microcode = rev;
-               uci->cpu_sig.rev = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
                        cpu, mc_amd->hdr.patch_id);
                return UCODE_ERROR;
        }
-       pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
-               mc_amd->hdr.patch_id);
 
-       uci->cpu_sig.rev = mc_amd->hdr.patch_id;
-       c->microcode = mc_amd->hdr.patch_id;
+       rev = mc_amd->hdr.patch_id;
+       ret = UCODE_UPDATED;
+
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
 
-       return UCODE_UPDATED;
+out:
+       uci->cpu_sig.rev = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
+
+       return ret;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
index 97ccf4c..16936a2 100644 (file)
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_intel *mc;
+       enum ucode_state ret;
        static int prev_rev;
        u32 rev;
 
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
         */
        rev = intel_get_microcode_revision();
        if (rev >= mc->hdr.rev) {
-               uci->cpu_sig.rev = rev;
-               c->microcode = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        /*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
                prev_rev = rev;
        }
 
+       ret = UCODE_UPDATED;
+
+out:
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
 
-       return UCODE_UPDATED;
+       return ret;
 }
 
 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
index f568951..2b58864 100644 (file)
@@ -146,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
         * they can be printed in the right context.
         */
        if (!partial && on_stack(info, regs, sizeof(*regs))) {
-               __show_regs(regs, 0);
+               __show_regs(regs, SHOW_REGS_SHORT);
 
        } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
                                       IRET_FRAME_SIZE)) {
@@ -344,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
        oops_exit();
 
        /* Executive summary in case the oops scrolled away */
-       __show_regs(&exec_summary_regs, true);
+       __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
 
        if (!signr)
                return;
@@ -407,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err)
 
 void show_regs(struct pt_regs *regs)
 {
-       bool all = true;
-
        show_regs_print_info(KERN_DEFAULT);
 
-       if (IS_ENABLED(CONFIG_X86_32))
-               all = !user_mode(regs);
-
-       __show_regs(regs, all);
+       __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
 
        /*
         * When in-kernel, we also print out the stack at the time of the fault..
index f260e45..e8c8c5d 100644 (file)
@@ -7,11 +7,17 @@
 #include <linux/eisa.h>
 #include <linux/io.h>
 
+#include <xen/xen.h>
+
 static __init int eisa_bus_probe(void)
 {
-       void __iomem *p = ioremap(0x0FFFD9, 4);
+       void __iomem *p;
+
+       if (xen_pv_domain() && !xen_initial_domain())
+               return 0;
 
-       if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
+       p = ioremap(0x0FFFD9, 4);
+       if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
                EISA_bus = 1;
        iounmap(p);
        return 0;
index 2924fd4..5046a3c 100644 (file)
@@ -59,7 +59,7 @@
 #include <asm/intel_rdt_sched.h>
 #include <asm/proto.h>
 
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
               (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
 
-       if (!all)
+       if (mode != SHOW_REGS_ALL)
                return;
 
        cr0 = read_cr0();
index a451bc3..ea5ea85 100644 (file)
@@ -62,7 +62,7 @@
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
 /* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
-       if (!all)
+       if (mode == SHOW_REGS_SHORT)
                return;
 
+       if (mode == SHOW_REGS_USER) {
+               rdmsrl(MSR_FS_BASE, fs);
+               rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+               printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
+                      fs, shadowgs);
+               return;
+       }
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
index 12cbe2b..738bf42 100644 (file)
@@ -111,8 +111,10 @@ int arch_register_cpu(int num)
        /*
         * Currently CPU0 is only hotpluggable on Intel platforms. Other
         * vendors can add hotplug support later.
+        * Xen PV guests don't support CPU0 hotplug at all.
         */
-       if (c->x86_vendor != X86_VENDOR_INTEL)
+       if (c->x86_vendor != X86_VENDOR_INTEL ||
+           boot_cpu_has(X86_FEATURE_XENPV))
                cpu0_hotpluggable = 0;
 
        /*
index 1463468..6490f61 100644 (file)
@@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
 
 static unsigned long __init get_loops_per_jiffy(void)
 {
-       unsigned long lpj = tsc_khz * KHZ;
+       u64 lpj = (u64)tsc_khz * KHZ;
 
        do_div(lpj, HZ);
        return lpj;
index e848a48..ae39455 100644 (file)
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
        if (pgd_val(pgd) != 0) {
                pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 
-               *pgdp = native_make_pgd(0);
+               pgd_clear(pgdp);
 
                paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
                pmd_free(mm, pmd);
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
        int changed = !pte_same(*ptep, entry);
 
        if (changed && dirty)
-               *ptep = entry;
+               set_pte(ptep, entry);
 
        return changed;
 }
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        if (changed && dirty) {
-               *pmdp = entry;
+               set_pmd(pmdp, entry);
                /*
                 * We had a write-protection fault here and changed the pmd
                 * to to more permissive. No need to flush the TLB for that,
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
 
        if (changed && dirty) {
-               *pudp = entry;
+               set_pud(pudp, entry);
                /*
                 * We had a write-protection fault here and changed the pud
                 * to to more permissive. No need to flush the TLB for that,
index 05ca142..9959657 100644 (file)
@@ -85,10 +85,9 @@ pgd_t * __init efi_call_phys_prolog(void)
 
 void __init efi_call_phys_epilog(pgd_t *save_pgd)
 {
+       load_fixmap_gdt(0);
        load_cr3(save_pgd);
        __flush_tlb_all();
-
-       load_fixmap_gdt(0);
 }
 
 void __init efi_runtime_update_mappings(void)
index 04d038f..b9ad83a 100644 (file)
@@ -4,6 +4,7 @@ config ZONE_DMA
 
 config XTENSA
        def_bool y
+       select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
index 295c120..d67e30f 100644 (file)
@@ -64,11 +64,7 @@ endif
 vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
 plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
 
-ifeq ($(KBUILD_SRC),)
-KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
-else
 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
-endif
 
 KBUILD_DEFCONFIG := iss_defconfig
 
index f4bbb28..58709e8 100644 (file)
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
 
 void __init platform_setup(char **p_cmdline)
 {
+       static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
+       static char cmdline[COMMAND_LINE_SIZE] __initdata;
        int argc = simc_argc();
        int argv_size = simc_argv_size();
 
        if (argc > 1) {
-               void **argv = alloc_bootmem(argv_size);
-               char *cmdline = alloc_bootmem(argv_size);
-               int i;
+               if (argv_size > sizeof(argv)) {
+                       pr_err("%s: command line too long: argv_size = %d\n",
+                              __func__, argv_size);
+               } else {
+                       int i;
 
-               cmdline[0] = 0;
-               simc_argv((void *)argv);
+                       cmdline[0] = 0;
+                       simc_argv((void *)argv);
 
-               for (i = 1; i < argc; ++i) {
-                       if (i > 1)
-                               strcat(cmdline, " ");
-                       strcat(cmdline, argv[i]);
+                       for (i = 1; i < argc; ++i) {
+                               if (i > 1)
+                                       strcat(cmdline, " ");
+                               strcat(cmdline, argv[i]);
+                       }
+                       *p_cmdline = cmdline;
                }
-               *p_cmdline = cmdline;
        }
 
        atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
index 58c6efa..9fe5952 100644 (file)
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
 
 void bfqg_and_blkg_put(struct bfq_group *bfqg)
 {
-       bfqg_put(bfqg);
-
        blkg_put(bfqg_to_blkg(bfqg));
+
+       bfqg_put(bfqg);
 }
 
 /* @stats = 0 */
index b12966e..8c680a7 100644 (file)
@@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
 {
        if (unlikely(bio->bi_blkg))
                return -EBUSY;
-       blkg_get(blkg);
+       if (!blkg_try_get(blkg))
+               return -ENODEV;
        bio->bi_blkg = blkg;
        return 0;
 }
index 694595b..c630e02 100644 (file)
@@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
        }
 }
 
-static void blkg_pd_offline(struct blkcg_gq *blkg)
-{
-       int i;
-
-       lockdep_assert_held(blkg->q->queue_lock);
-       lockdep_assert_held(&blkg->blkcg->lock);
-
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
-
-               if (blkg->pd[i] && !blkg->pd[i]->offline &&
-                   pol->pd_offline_fn) {
-                       pol->pd_offline_fn(blkg->pd[i]);
-                       blkg->pd[i]->offline = true;
-               }
-       }
-}
-
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
        struct blkcg *blkcg = blkg->blkcg;
        struct blkcg_gq *parent = blkg->parent;
+       int i;
 
        lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
@@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        WARN_ON_ONCE(list_empty(&blkg->q_node));
        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+
+               if (blkg->pd[i] && pol->pd_offline_fn)
+                       pol->pd_offline_fn(blkg->pd[i]);
+       }
+
        if (parent) {
                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q)
                struct blkcg *blkcg = blkg->blkcg;
 
                spin_lock(&blkcg->lock);
-               blkg_pd_offline(blkg);
                blkg_destroy(blkg);
                spin_unlock(&blkcg->lock);
        }
@@ -1053,59 +1042,64 @@ static struct cftype blkcg_legacy_files[] = {
        { }     /* terminate */
 };
 
+/*
+ * blkcg destruction is a three-stage process.
+ *
+ * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
+ *    which offlines writeback.  Here we tie the next stage of blkg destruction
+ *    to the completion of writeback associated with the blkcg.  This lets us
+ *    avoid punting potentially large amounts of outstanding writeback to root
+ *    while maintaining any ongoing policies.  The next stage is triggered when
+ *    the nr_cgwbs count goes to zero.
+ *
+ * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
+ *    and handles the destruction of blkgs.  Here the css reference held by
+ *    the blkg is put back eventually allowing blkcg_css_free() to be called.
+ *    This work may occur in cgwb_release_workfn() on the cgwb_release
+ *    workqueue.  Any submitted ios that fail to get the blkg ref will be
+ *    punted to the root_blkg.
+ *
+ * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
+ *    This finally frees the blkcg.
+ */
+
 /**
  * blkcg_css_offline - cgroup css_offline callback
  * @css: css of interest
  *
- * This function is called when @css is about to go away and responsible
- * for offlining all blkgs pd and killing all wbs associated with @css.
- * blkgs pd offline should be done while holding both q and blkcg locks.
- * As blkcg lock is nested inside q lock, this function performs reverse
- * double lock dancing.
- *
- * This is the blkcg counterpart of ioc_release_fn().
+ * This function is called when @css is about to go away.  Here the cgwbs are
+ * offlined first and only once writeback associated with the blkcg has
+ * finished do we start step 2 (see above).
  */
 static void blkcg_css_offline(struct cgroup_subsys_state *css)
 {
        struct blkcg *blkcg = css_to_blkcg(css);
-       struct blkcg_gq *blkg;
-
-       spin_lock_irq(&blkcg->lock);
-
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct request_queue *q = blkg->q;
-
-               if (spin_trylock(q->queue_lock)) {
-                       blkg_pd_offline(blkg);
-                       spin_unlock(q->queue_lock);
-               } else {
-                       spin_unlock_irq(&blkcg->lock);
-                       cpu_relax();
-                       spin_lock_irq(&blkcg->lock);
-               }
-       }
-
-       spin_unlock_irq(&blkcg->lock);
 
+       /* this prevents anyone from attaching or migrating to this blkcg */
        wb_blkcg_offline(blkcg);
+
+       /* put the base cgwb reference allowing step 2 to be triggered */
+       blkcg_cgwb_put(blkcg);
 }
 
 /**
- * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
+ * blkcg_destroy_blkgs - responsible for shooting down blkgs
  * @blkcg: blkcg of interest
  *
- * This function is called when blkcg css is about to free and responsible for
- * destroying all blkgs associated with @blkcg.
- * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
+ * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
  * is nested inside q lock, this function performs reverse double lock dancing.
+ * Destroying the blkgs releases the reference held on the blkcg's css allowing
+ * blkcg_css_free to eventually be called.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
  */
-static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
+void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
        spin_lock_irq(&blkcg->lock);
+
        while (!hlist_empty(&blkcg->blkg_list)) {
                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
-                                                   struct blkcg_gq,
-                                                   blkcg_node);
+                                               struct blkcg_gq, blkcg_node);
                struct request_queue *q = blkg->q;
 
                if (spin_trylock(q->queue_lock)) {
@@ -1117,6 +1111,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
                        spin_lock_irq(&blkcg->lock);
                }
        }
+
        spin_unlock_irq(&blkcg->lock);
 }
 
@@ -1125,8 +1120,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
        struct blkcg *blkcg = css_to_blkcg(css);
        int i;
 
-       blkcg_destroy_all_blkgs(blkcg);
-
        mutex_lock(&blkcg_pol_mutex);
 
        list_del(&blkcg->all_blkcgs_node);
@@ -1189,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
        INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
+       refcount_set(&blkcg->cgwb_refcnt, 1);
 #endif
        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
 
@@ -1480,11 +1474,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
                if (blkg->pd[pol->plid]) {
-                       if (!blkg->pd[pol->plid]->offline &&
-                           pol->pd_offline_fn) {
+                       if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
-                               blkg->pd[pol->plid]->offline = true;
-                       }
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
@@ -1519,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
        for (i = 0; i < BLKCG_MAX_POLS; i++)
                if (!blkcg_policy[i])
                        break;
-       if (i >= BLKCG_MAX_POLS)
+       if (i >= BLKCG_MAX_POLS) {
+               pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
                goto err_unlock;
+       }
 
        /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
        if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
index dee56c2..4dbc93f 100644 (file)
@@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
 {
        const int op = bio_op(bio);
 
-       if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
+       if (part->policy && op_is_write(op)) {
                char b[BDEVNAME_SIZE];
 
+               if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+                       return false;
+
                WARN_ONCE(1,
                       "generic_make_request: Trying to write "
                        "to read-only block-device %s (partno %d)\n",
index a3eede0..01d0620 100644 (file)
@@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
 {
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-       if (bio->bi_css)
-               bio_associate_blkg(bio, tg_to_blkg(tg));
+       /* fallback to root_blkg if we fail to get a blkg ref */
+       if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
+               bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 #endif
 }
index 9706613..bf64cfa 100644 (file)
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
 #define LPSS_GPIODEF0_DMA_LLP          BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
-static bool lpss_iosf_d3_entered;
+static bool lpss_iosf_d3_entered = true;
 
 static void lpss_iosf_enter_d3_state(void)
 {
index 292088f..d2e29a1 100644 (file)
 #include <linux/delay.h>
 #ifdef CONFIG_X86
 #include <asm/mpspec.h>
+#include <linux/dmi.h>
 #endif
 #include <linux/acpi_iort.h>
 #include <linux/pci.h>
 #include <acpi/apei.h>
-#include <linux/dmi.h>
 #include <linux/suspend.h>
 
 #include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
        },
        {}
 };
-#else
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
-       {}
-};
 #endif
 
 /* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
+#ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
-        * DSDT will be copied to memory
+        * DSDT will be copied to memory.
+        * Note that calling dmi_check_system() here on other architectures
+        * would not be OK because only x86 initializes dmi early enough.
+        * Thankfully only x86 systems need such quirks for now.
         */
        dmi_check_system(dsdt_dmi_table);
+#endif
 
        status = acpi_reallocate_root_table();
        if (ACPI_FAILURE(status)) {
index 3f3b7b2..64fd96e 100644 (file)
@@ -332,6 +332,35 @@ err_no_vma:
        return vma ? -ENOMEM : -ESRCH;
 }
 
+
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+               struct vm_area_struct *vma)
+{
+       if (vma)
+               alloc->vma_vm_mm = vma->vm_mm;
+       /*
+        * If we see alloc->vma is not NULL, buffer data structures set up
+        * completely. Look at smp_rmb side binder_alloc_get_vma.
+        * We also want to guarantee new alloc->vma_vm_mm is always visible
+        * if alloc->vma is set.
+        */
+       smp_wmb();
+       alloc->vma = vma;
+}
+
+static inline struct vm_area_struct *binder_alloc_get_vma(
+               struct binder_alloc *alloc)
+{
+       struct vm_area_struct *vma = NULL;
+
+       if (alloc->vma) {
+               /* Look at description in binder_alloc_set_vma */
+               smp_rmb();
+               vma = alloc->vma;
+       }
+       return vma;
+}
+
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        size_t size, data_offsets_size;
        int ret;
 
-       if (alloc->vma == NULL) {
+       if (!binder_alloc_get_vma(alloc)) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
        alloc->free_async_space = alloc->buffer_size / 2;
-       barrier();
-       alloc->vma = vma;
-       alloc->vma_vm_mm = vma->vm_mm;
+       binder_alloc_set_vma(alloc, vma);
        mmgrab(alloc->vma_vm_mm);
 
        return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
        int buffers, page_count;
        struct binder_buffer *buffer;
 
-       BUG_ON(alloc->vma);
-
        buffers = 0;
        mutex_lock(&alloc->mutex);
+       BUG_ON(alloc->vma);
+
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
 
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  */
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
-       WRITE_ONCE(alloc->vma, NULL);
+       binder_alloc_set_vma(alloc, NULL);
 }
 
 /**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       vma = alloc->vma;
+       vma = binder_alloc_get_vma(alloc);
        if (vma) {
                if (!mmget_not_zero(alloc->vma_vm_mm))
                        goto err_mmget;
index 172e328..599e01b 100644 (file)
@@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
 EXPORT_SYMBOL_GPL(ata_cable_ignore);
 EXPORT_SYMBOL_GPL(ata_cable_sata);
 EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(ata_host_put);
index 0943e70..b3c0498 100644 (file)
@@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
 static int alloc_lookup_fw_priv(const char *fw_name,
                                struct firmware_cache *fwc,
                                struct fw_priv **fw_priv, void *dbuf,
-                               size_t size)
+                               size_t size, enum fw_opt opt_flags)
 {
        struct fw_priv *tmp;
 
        spin_lock(&fwc->lock);
-       tmp = __lookup_fw_priv(fw_name);
-       if (tmp) {
-               kref_get(&tmp->ref);
-               spin_unlock(&fwc->lock);
-               *fw_priv = tmp;
-               pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
-               return 1;
+       if (!(opt_flags & FW_OPT_NOCACHE)) {
+               tmp = __lookup_fw_priv(fw_name);
+               if (tmp) {
+                       kref_get(&tmp->ref);
+                       spin_unlock(&fwc->lock);
+                       *fw_priv = tmp;
+                       pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+                       return 1;
+               }
        }
+
        tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
-       if (tmp)
+       if (tmp && !(opt_flags & FW_OPT_NOCACHE))
                list_add(&tmp->list, &fwc->head);
        spin_unlock(&fwc->lock);
 
@@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device,
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-                         struct device *device, void *dbuf, size_t size)
+                         struct device *device, void *dbuf, size_t size,
+                         enum fw_opt opt_flags)
 {
        struct firmware *firmware;
        struct fw_priv *fw_priv;
@@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
                return 0; /* assigned */
        }
 
-       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size);
+       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+                                 opt_flags);
 
        /*
         * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
                goto out;
        }
 
-       ret = _request_firmware_prepare(&fw, name, device, buf, size);
+       ret = _request_firmware_prepare(&fw, name, device, buf, size,
+                                       opt_flags);
        if (ret <= 0) /* error or already assigned */
                goto out;
 
index c8a1cb0..817320c 100644 (file)
@@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev,
        struct zone *default_zone;
        int nid;
 
-       /*
-        * The block contains more than one zone can not be offlined.
-        * This can happen e.g. for ZONE_DMA and ZONE_DMA32
-        */
-       if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
-               return sprintf(buf, "none\n");
-
-       start_pfn = valid_start_pfn;
-       nr_pages = valid_end_pfn - start_pfn;
-
        /*
         * Check the existing zone. Make sure that we do that only on the
         * online nodes otherwise the page_zone is not reliable
         */
        if (mem->state == MEM_ONLINE) {
+               /*
+                * The block contains more than one zone can not be offlined.
+                * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+                */
+               if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+                                         &valid_start_pfn, &valid_end_pfn))
+                       return sprintf(buf, "none\n");
+               start_pfn = valid_start_pfn;
                strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
                goto out;
        }
 
-       nid = pfn_to_nid(start_pfn);
+       nid = mem->nid;
        default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
        strcat(buf, default_zone->name);
 
index 3863c00..14a5125 100644 (file)
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_SOCK:
                return nbd_add_socket(nbd, arg, false);
        case NBD_SET_BLKSIZE:
+               if (!arg || !is_power_of_2(arg) || arg < 512 ||
+                   arg > PAGE_SIZE)
+                       return -EINVAL;
                nbd_size_set(nbd, arg,
                             div_s64(config->bytesize, arg));
                return 0;
index d81781f..34e0030 100644 (file)
@@ -87,10 +87,10 @@ struct nullb {
 #ifdef CONFIG_BLK_DEV_ZONED
 int null_zone_init(struct nullb_device *dev);
 void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb,
-                                           struct nullb_cmd *cmd);
-void null_zone_write(struct nullb_cmd *cmd);
-void null_zone_reset(struct nullb_cmd *cmd);
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                       unsigned int nr_sectors);
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
 #else
 static inline int null_zone_init(struct nullb_device *dev)
 {
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
 }
 static inline void null_zone_exit(struct nullb_device *dev) {}
 static inline blk_status_t null_zone_report(struct nullb *nullb,
-                                           struct nullb_cmd *cmd)
+                                           struct bio *bio)
 {
        return BLK_STS_NOTSUPP;
 }
-static inline void null_zone_write(struct nullb_cmd *cmd) {}
-static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                                  unsigned int nr_sectors)
+{
+}
+static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
 #endif /* CONFIG_BLK_DEV_ZONED */
 #endif /* __NULL_BLK_H */
index 6127e3f..093b614 100644 (file)
@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
        }
 }
 
+static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
+{
+       struct nullb_device *dev = cmd->nq->dev;
+
+       if (dev->queue_mode == NULL_Q_BIO) {
+               if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
+                       cmd->error = null_zone_report(nullb, cmd->bio);
+                       return true;
+               }
+       } else {
+               if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+                       cmd->error = null_zone_report(nullb, cmd->rq->bio);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
 {
        struct nullb_device *dev = cmd->nq->dev;
        struct nullb *nullb = dev->nullb;
        int err = 0;
 
-       if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
-               cmd->error = null_zone_report(nullb, cmd);
+       if (cmd_report_zone(nullb, cmd))
                goto out;
-       }
 
        if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
                struct request *rq = cmd->rq;
@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
        cmd->error = errno_to_blk_status(err);
 
        if (!cmd->error && dev->zoned) {
-               if (req_op(cmd->rq) == REQ_OP_WRITE)
-                       null_zone_write(cmd);
-               else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
-                       null_zone_reset(cmd);
+               sector_t sector;
+               unsigned int nr_sectors;
+               int op;
+
+               if (dev->queue_mode == NULL_Q_BIO) {
+                       op = bio_op(cmd->bio);
+                       sector = cmd->bio->bi_iter.bi_sector;
+                       nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
+               } else {
+                       op = req_op(cmd->rq);
+                       sector = blk_rq_pos(cmd->rq);
+                       nr_sectors = blk_rq_sectors(cmd->rq);
+               }
+
+               if (op == REQ_OP_WRITE)
+                       null_zone_write(cmd, sector, nr_sectors);
+               else if (op == REQ_OP_ZONE_RESET)
+                       null_zone_reset(cmd, sector);
        }
 out:
        /* Complete IO by inline, softirq or timer */
index a979ca0..7c6b86d 100644 (file)
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
        kvfree(dev->zones);
 }
 
-static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
-                             unsigned int zno, unsigned int nr_zones)
+static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
+                              unsigned int zno, unsigned int nr_zones)
 {
        struct blk_zone_report_hdr *hdr = NULL;
        struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
        void *addr;
        unsigned int zones_to_cpy;
 
-       bio_for_each_segment(bvec, rq->bio, iter) {
+       bio_for_each_segment(bvec, bio, iter) {
                addr = kmap_atomic(bvec.bv_page);
 
                zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
        }
 }
 
-blk_status_t null_zone_report(struct nullb *nullb,
-                                    struct nullb_cmd *cmd)
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
 {
        struct nullb_device *dev = nullb->dev;
-       struct request *rq = cmd->rq;
-       unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+       unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
        unsigned int nr_zones = dev->nr_zones - zno;
-       unsigned int max_zones = (blk_rq_bytes(rq) /
-                                       sizeof(struct blk_zone)) - 1;
+       unsigned int max_zones;
 
+       max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
        nr_zones = min_t(unsigned int, nr_zones, max_zones);
-
-       null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+       null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
 
        return BLK_STS_OK;
 }
 
-void null_zone_write(struct nullb_cmd *cmd)
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                    unsigned int nr_sectors)
 {
        struct nullb_device *dev = cmd->nq->dev;
-       struct request *rq = cmd->rq;
-       sector_t sector = blk_rq_pos(rq);
-       unsigned int rq_sectors = blk_rq_sectors(rq);
        unsigned int zno = null_zone_no(dev, sector);
        struct blk_zone *zone = &dev->zones[zno];
 
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
        case BLK_ZONE_COND_EMPTY:
        case BLK_ZONE_COND_IMP_OPEN:
                /* Writes must be at the write pointer position */
-               if (blk_rq_pos(rq) != zone->wp) {
+               if (sector != zone->wp) {
                        cmd->error = BLK_STS_IOERR;
                        break;
                }
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
                if (zone->cond == BLK_ZONE_COND_EMPTY)
                        zone->cond = BLK_ZONE_COND_IMP_OPEN;
 
-               zone->wp += rq_sectors;
+               zone->wp += nr_sectors;
                if (zone->wp == zone->start + zone->len)
                        zone->cond = BLK_ZONE_COND_FULL;
                break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
        }
 }
 
-void null_zone_reset(struct nullb_cmd *cmd)
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
 {
        struct nullb_device *dev = cmd->nq->dev;
-       struct request *rq = cmd->rq;
-       unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+       unsigned int zno = null_zone_no(dev, sector);
        struct blk_zone *zone = &dev->zones[zno];
 
        zone->cond = BLK_ZONE_COND_EMPTY;
index 7915f3b..73ed5f3 100644 (file)
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
 
                count += sprintf(&buf[count], "%s"
                            "pool_id %llu\npool_name %s\n"
+                           "pool_ns %s\n"
                            "image_id %s\nimage_name %s\n"
                            "snap_id %llu\nsnap_name %s\n"
                            "overlap %llu\n",
                            !count ? "" : "\n", /* first? */
                            spec->pool_id, spec->pool_name,
+                           spec->pool_ns ?: "",
                            spec->image_id, spec->image_name ?: "(unknown)",
                            spec->snap_id, spec->snap_name,
                            rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
                                                &rbd_dev->header.features);
 }
 
+struct parent_image_info {
+       u64             pool_id;
+       const char      *pool_ns;
+       const char      *image_id;
+       u64             snap_id;
+
+       bool            has_overlap;
+       u64             overlap;
+};
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int decode_parent_image_spec(void **p, void *end,
+                                   struct parent_image_info *pii)
+{
+       u8 struct_v;
+       u32 struct_len;
+       int ret;
+
+       ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
+                                 &struct_v, &struct_len);
+       if (ret)
+               return ret;
+
+       ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
+       pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->pool_ns)) {
+               ret = PTR_ERR(pii->pool_ns);
+               pii->pool_ns = NULL;
+               return ret;
+       }
+       pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int __get_parent_info(struct rbd_device *rbd_dev,
+                            struct page *req_page,
+                            struct page *reply_page,
+                            struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret == -EOPNOTSUPP ? 1 : ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ret = decode_parent_image_spec(&p, end, pii);
+       if (ret)
+               return ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
+       if (pii->has_overlap)
+               ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+                                   struct page *req_page,
+                                   struct page *reply_page,
+                                   struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "get_parent", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
+       pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
+       pii->has_overlap = true;
+       ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int get_parent_info(struct rbd_device *rbd_dev,
+                          struct parent_image_info *pii)
+{
+       struct page *req_page, *reply_page;
+       void *p;
+       int ret;
+
+       req_page = alloc_page(GFP_KERNEL);
+       if (!req_page)
+               return -ENOMEM;
+
+       reply_page = alloc_page(GFP_KERNEL);
+       if (!reply_page) {
+               __free_page(req_page);
+               return -ENOMEM;
+       }
+
+       p = page_address(req_page);
+       ceph_encode_64(&p, rbd_dev->spec->snap_id);
+       ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
+       if (ret > 0)
+               ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
+                                              pii);
+
+       __free_page(req_page);
+       __free_page(reply_page);
+       return ret;
+}
+
 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 {
        struct rbd_spec *parent_spec;
-       size_t size;
-       void *reply_buf = NULL;
-       __le64 snapid;
-       void *p;
-       void *end;
-       u64 pool_id;
-       char *image_id;
-       u64 snap_id;
-       u64 overlap;
+       struct parent_image_info pii = { 0 };
        int ret;
 
        parent_spec = rbd_spec_alloc();
        if (!parent_spec)
                return -ENOMEM;
 
-       size = sizeof (__le64) +                                /* pool_id */
-               sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
-               sizeof (__le64) +                               /* snap_id */
-               sizeof (__le64);                                /* overlap */
-       reply_buf = kmalloc(size, GFP_KERNEL);
-       if (!reply_buf) {
-               ret = -ENOMEM;
+       ret = get_parent_info(rbd_dev, &pii);
+       if (ret)
                goto out_err;
-       }
 
-       snapid = cpu_to_le64(rbd_dev->spec->snap_id);
-       ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
-                                 &rbd_dev->header_oloc, "get_parent",
-                                 &snapid, sizeof(snapid), reply_buf, size);
-       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
-       if (ret < 0)
-               goto out_err;
+       dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+            __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+            pii.has_overlap, pii.overlap);
 
-       p = reply_buf;
-       end = reply_buf + ret;
-       ret = -ERANGE;
-       ceph_decode_64_safe(&p, end, pool_id, out_err);
-       if (pool_id == CEPH_NOPOOL) {
+       if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
                /*
                 * Either the parent never existed, or we have
                 * record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 * overlap to 0.  The effect of this is that all new
                 * requests will be treated as if the image had no
                 * parent.
+                *
+                * If !pii.has_overlap, the parent image spec is not
+                * applicable.  It's there to avoid duplication in each
+                * snapshot record.
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        /* The ceph file layout needs to fit pool id in 32 bits */
 
        ret = -EIO;
-       if (pool_id > (u64)U32_MAX) {
+       if (pii.pool_id > (u64)U32_MAX) {
                rbd_warn(NULL, "parent pool id too large (%llu > %u)",
-                       (unsigned long long)pool_id, U32_MAX);
+                       (unsigned long long)pii.pool_id, U32_MAX);
                goto out_err;
        }
 
-       image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
-       if (IS_ERR(image_id)) {
-               ret = PTR_ERR(image_id);
-               goto out_err;
-       }
-       ceph_decode_64_safe(&p, end, snap_id, out_err);
-       ceph_decode_64_safe(&p, end, overlap, out_err);
-
        /*
         * The parent won't change (except when the clone is
         * flattened, already handled that).  So we only need to
         * record the parent spec we have not already done so.
         */
        if (!rbd_dev->parent_spec) {
-               parent_spec->pool_id = pool_id;
-               parent_spec->image_id = image_id;
-               parent_spec->snap_id = snap_id;
-
-               /* TODO: support cloning across namespaces */
-               if (rbd_dev->spec->pool_ns) {
-                       parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
-                                                      GFP_KERNEL);
-                       if (!parent_spec->pool_ns) {
-                               ret = -ENOMEM;
-                               goto out_err;
-                       }
+               parent_spec->pool_id = pii.pool_id;
+               if (pii.pool_ns && *pii.pool_ns) {
+                       parent_spec->pool_ns = pii.pool_ns;
+                       pii.pool_ns = NULL;
                }
+               parent_spec->image_id = pii.image_id;
+               pii.image_id = NULL;
+               parent_spec->snap_id = pii.snap_id;
 
                rbd_dev->parent_spec = parent_spec;
                parent_spec = NULL;     /* rbd_dev now owns this */
-       } else {
-               kfree(image_id);
        }
 
        /*
         * We always update the parent overlap.  If it's zero we issue
         * a warning, as we will proceed as if there was no parent.
         */
-       if (!overlap) {
+       if (!pii.overlap) {
                if (parent_spec) {
                        /* refresh, careful to warn just once */
                        if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                        rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
                }
        }
-       rbd_dev->parent_overlap = overlap;
+       rbd_dev->parent_overlap = pii.overlap;
 
 out:
        ret = 0;
 out_err:
-       kfree(reply_buf);
+       kfree(pii.pool_ns);
+       kfree(pii.image_id);
        rbd_spec_put(parent_spec);
-
        return ret;
 }
 
index ce277ee..4072849 100644 (file)
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
        that CPU manufacturer (perhaps with the insistence or mandate
        of a Nation State's intelligence or law enforcement agencies)
        has not installed a hidden back door to compromise the CPU's
-       random number generation facilities.
-
+       random number generation facilities. This can also be configured
+       at boot with "random.trust_cpu=on/off".
index a339766..97d6856 100644 (file)
@@ -59,8 +59,6 @@ enum bt_states {
        BT_STATE_RESET3,
        BT_STATE_RESTART,
        BT_STATE_PRINTME,
-       BT_STATE_CAPABILITIES_BEGIN,
-       BT_STATE_CAPABILITIES_END,
        BT_STATE_LONG_BUSY      /* BT doesn't get hosed :-) */
 };
 
@@ -86,7 +84,6 @@ struct si_sm_data {
        int             error_retries;  /* end of "common" fields */
        int             nonzero_status; /* hung BMCs stay all 0 */
        enum bt_states  complete;       /* to divert the state machine */
-       int             BT_CAP_outreqs;
        long            BT_CAP_req2rsp;
        int             BT_CAP_retries; /* Recommended retries */
 };
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
        case BT_STATE_RESET3:           return("RESET3");
        case BT_STATE_RESTART:          return("RESTART");
        case BT_STATE_LONG_BUSY:        return("LONG_BUSY");
-       case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
-       case BT_STATE_CAPABILITIES_END: return("CAP_END");
        }
        return("BAD STATE");
 }
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
        bt->complete = BT_STATE_IDLE;   /* end here */
        bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
        bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
-       /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
        return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-       unsigned char status, BT_CAP[8];
+       unsigned char status;
        static enum bt_states last_printed = BT_STATE_PRINTME;
        int i;
 
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                if (status & BT_H_BUSY)         /* clear a leftover H_BUSY */
                        BT_CONTROL(BT_H_BUSY);
 
-               bt->timeout = bt->BT_CAP_req2rsp;
-
-               /* Read BT capabilities if it hasn't been done yet */
-               if (!bt->BT_CAP_outreqs)
-                       BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
-                                       SI_SM_CALL_WITHOUT_DELAY);
                BT_SI_SM_RETURN(SI_SM_IDLE);
 
        case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                BT_STATE_CHANGE(BT_STATE_XACTION_START,
                                SI_SM_CALL_WITH_DELAY);
 
-       /*
-        * Get BT Capabilities, using timing of upper level state machine.
-        * Set outreqs to prevent infinite loop on timeout.
-        */
-       case BT_STATE_CAPABILITIES_BEGIN:
-               bt->BT_CAP_outreqs = 1;
-               {
-                       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
-                       bt->state = BT_STATE_IDLE;
-                       bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
-               }
-               bt->complete = BT_STATE_CAPABILITIES_END;
-               BT_STATE_CHANGE(BT_STATE_XACTION_START,
-                               SI_SM_CALL_WITH_DELAY);
-
-       case BT_STATE_CAPABILITIES_END:
-               i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
-               bt_init_data(bt, bt->io);
-               if ((i == 8) && !BT_CAP[2]) {
-                       bt->BT_CAP_outreqs = BT_CAP[3];
-                       bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
-                       bt->BT_CAP_retries = BT_CAP[7];
-               } else
-                       printk(KERN_WARNING "IPMI BT: using default values\n");
-               if (!bt->BT_CAP_outreqs)
-                       bt->BT_CAP_outreqs = 1;
-               printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
-                       bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
-               bt->timeout = bt->BT_CAP_req2rsp;
-               return SI_SM_CALL_WITHOUT_DELAY;
-
        default:        /* should never occur */
                return error_recovery(bt,
                                      status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 
 static int bt_detect(struct si_sm_data *bt)
 {
+       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+       unsigned char BT_CAP[8];
+       enum si_sm_result smi_result;
+       int rv;
+
        /*
         * It's impossible for the BT status and interrupt registers to be
         * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
        if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
                return 1;
        reset_flags(bt);
+
+       /*
+        * Try getting the BT capabilities here.
+        */
+       rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+       if (rv) {
+               dev_warn(bt->io->dev,
+                        "Can't start capabilities transaction: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       smi_result = SI_SM_CALL_WITHOUT_DELAY;
+       for (;;) {
+               if (smi_result == SI_SM_CALL_WITH_DELAY ||
+                   smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+                       schedule_timeout_uninterruptible(1);
+                       smi_result = bt_event(bt, jiffies_to_usecs(1));
+               } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+                       smi_result = bt_event(bt, 0);
+               } else
+                       break;
+       }
+
+       rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+       bt_init_data(bt, bt->io);
+       if (rv < 8) {
+               dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       if (BT_CAP[2]) {
+               dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+               dev_warn(bt->io->dev, "using default values\n");
+       } else {
+               bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+               bt->BT_CAP_retries = BT_CAP[7];
+       }
+
+       dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+                bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
        return 0;
 }
 
index 51832b8..7fc9612 100644 (file)
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
 
        rv = handlers->start_processing(send_info, intf);
        if (rv)
-               goto out;
+               goto out_err;
 
        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
        if (rv) {
                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
-               goto out;
+               goto out_err_started;
        }
 
        mutex_lock(&intf->bmc_reg_mutex);
        rv = __scan_channels(intf, &id);
        mutex_unlock(&intf->bmc_reg_mutex);
+       if (rv)
+               goto out_err_bmc_reg;
 
- out:
-       if (rv) {
-               ipmi_bmc_unregister(intf);
-               list_del_rcu(&intf->link);
-               mutex_unlock(&ipmi_interfaces_mutex);
-               synchronize_srcu(&ipmi_interfaces_srcu);
-               cleanup_srcu_struct(&intf->users_srcu);
-               kref_put(&intf->refcount, intf_free);
-       } else {
-               /*
-                * Keep memory order straight for RCU readers.  Make
-                * sure everything else is committed to memory before
-                * setting intf_num to mark the interface valid.
-                */
-               smp_wmb();
-               intf->intf_num = i;
-               mutex_unlock(&ipmi_interfaces_mutex);
+       /*
+        * Keep memory order straight for RCU readers.  Make
+        * sure everything else is committed to memory before
+        * setting intf_num to mark the interface valid.
+        */
+       smp_wmb();
+       intf->intf_num = i;
+       mutex_unlock(&ipmi_interfaces_mutex);
 
-               /* After this point the interface is legal to use. */
-               call_smi_watchers(i, intf->si_dev);
-       }
+       /* After this point the interface is legal to use. */
+       call_smi_watchers(i, intf->si_dev);
+
+       return 0;
+
+ out_err_bmc_reg:
+       ipmi_bmc_unregister(intf);
+ out_err_started:
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
+ out_err:
+       list_del_rcu(&intf->link);
+       mutex_unlock(&ipmi_interfaces_mutex);
+       synchronize_srcu(&ipmi_interfaces_srcu);
+       cleanup_srcu_struct(&intf->users_srcu);
+       kref_put(&intf->refcount, intf_free);
 
        return rv;
 }
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
        }
        srcu_read_unlock(&intf->users_srcu, index);
 
-       intf->handlers->shutdown(intf->send_info);
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
 
        cleanup_smi_msgs(intf);
 
index 90ec010..5faa917 100644 (file)
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
                 si_to_str[new_smi->io.si_type]);
 
        WARN_ON(new_smi->io.dev->init_name != NULL);
-       kfree(init_name);
-
-       return 0;
-
-out_err:
-       if (new_smi->intf) {
-               ipmi_unregister_smi(new_smi->intf);
-               new_smi->intf = NULL;
-       }
 
+ out_err:
        kfree(init_name);
-
        return rv;
 }
 
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
 
        kfree(smi_info->si_sm);
        smi_info->si_sm = NULL;
+
+       smi_info->intf = NULL;
 }
 
 /*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
 
        list_del(&smi_info->link);
 
-       if (smi_info->intf) {
+       if (smi_info->intf)
                ipmi_unregister_smi(smi_info->intf);
-               smi_info->intf = NULL;
-       }
 
        if (smi_info->pdev) {
                if (smi_info->pdev_registered)
index 18e4650..29e67a8 100644 (file)
@@ -181,6 +181,8 @@ struct ssif_addr_info {
        struct device *dev;
        struct i2c_client *client;
 
+       struct i2c_client *added_client;
+
        struct mutex clients_mutex;
        struct list_head clients;
 
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
                complete(&ssif_info->wake_thread);
                kthread_stop(ssif_info->thread);
        }
-
-       /*
-        * No message can be outstanding now, we have removed the
-        * upper layer and it permitted us to do so.
-        */
-       kfree(ssif_info);
 }
 
 static int ssif_remove(struct i2c_client *client)
 {
        struct ssif_info *ssif_info = i2c_get_clientdata(client);
-       struct ipmi_smi *intf;
        struct ssif_addr_info *addr_info;
 
        if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
         */
-       intf = ssif_info->intf;
-       ssif_info->intf = NULL;
-       ipmi_unregister_smi(intf);
+       ipmi_unregister_smi(ssif_info->intf);
 
        list_for_each_entry(addr_info, &ssif_infos, link) {
                if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
                }
        }
 
+       kfree(ssif_info);
+
        return 0;
 }
 
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
  out:
        if (rv) {
-               /*
-                * Note that if addr_info->client is assigned, we
-                * leave it.  The i2c client hangs around even if we
-                * return a failure here, and the failure here is not
-                * propagated back to the i2c code.  This seems to be
-                * design intent, strange as it may be.  But if we
-                * don't leave it, ssif_platform_remove will not remove
-                * the client like it should.
-                */
+               if (addr_info)
+                       addr_info->client = NULL;
+
                dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
                kfree(ssif_info);
        }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
        if (adev->type != &i2c_adapter_type)
                return 0;
 
-       i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+       addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+                                                &addr_info->binfo);
 
        if (!addr_info->adapter_name)
                return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
                return 0;
 
        mutex_lock(&ssif_infos_mutex);
-       i2c_unregister_device(addr_info->client);
+       i2c_unregister_device(addr_info->added_client);
 
        list_del(&addr_info->link);
        kfree(addr_info);
index bb882ab..e6124bd 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "kcs_bmc.h"
 
+#define DEVICE_NAME "ipmi-kcs"
+
 #define KCS_MSG_BUFSIZ    1000
 
 #define KCS_ZERO_DATA     0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
        if (!kcs_bmc)
                return NULL;
 
-       dev_set_name(dev, "ipmi-kcs%u", channel);
-
        spin_lock_init(&kcs_bmc->lock);
        kcs_bmc->channel = channel;
 
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
                return NULL;
 
        kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
-       kcs_bmc->miscdev.name = dev_name(dev);
+       kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+                                              DEVICE_NAME, channel);
        kcs_bmc->miscdev.fops = &kcs_bmc_fops;
 
        return kcs_bmc;
index bf5f99f..c75b6cd 100644 (file)
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
 
 static void invalidate_batched_entropy(void);
 
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+       return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
 static void crng_initialize(struct crng_state *crng)
 {
        int             i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
                }
                crng->state[i] ^= rv;
        }
-#ifdef CONFIG_RANDOM_TRUST_CPU
-       if (arch_init) {
+       if (trust_cpu && arch_init) {
                crng_init = 2;
                pr_notice("random: crng done (trusting CPU's manufacturer)\n");
        }
-#endif
        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
 
index 6fd4608..bbe4d72 100644 (file)
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
 {
        struct file *filp = vmf->vma->vm_file;
        unsigned long fault_size;
-       int rc, id;
+       vm_fault_t rc = VM_FAULT_SIGBUS;
+       int id;
        pfn_t pfn;
        struct dev_dax *dev_dax = filp->private_data;
 
index b76cb17..adfd316 100644 (file)
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
        int ret;
        struct device *dev = &mbdev->dev;
 
-       mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
+       mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
        if (!mic_dma_dev) {
                ret = -ENOMEM;
                goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
 reg_error:
        mic_dma_uninit(mic_dma_dev);
 init_error:
-       kfree(mic_dma_dev);
        mic_dma_dev = NULL;
 alloc_error:
        dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
 static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
 {
        mic_dma_uninit(mic_dma_dev);
-       kfree(mic_dma_dev);
 }
 
 /* DEBUGFS CODE */
index 721e6c5..6434294 100644 (file)
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
                                        le32_to_cpu(attr->sustained_freq_khz);
                dom_info->sustained_perf_level =
                                        le32_to_cpu(attr->sustained_perf_level);
-               dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
+               if (!dom_info->sustained_freq_khz ||
+                   !dom_info->sustained_perf_level)
+                       /* CPUFreq converts to kHz, hence default 1000 */
+                       dom_info->mult_factor = 1000;
+               else
+                       dom_info->mult_factor =
+                                       (dom_info->sustained_freq_khz * 1000) /
                                        dom_info->sustained_perf_level;
                memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
        }
index fc9fd2d..0b84053 100644 (file)
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
                /* Create region for each port */
                fme_region = dfl_fme_create_region(pdata, mgr,
                                                   fme_br->br, i);
-               if (!fme_region) {
+               if (IS_ERR(fme_region)) {
                        ret = PTR_ERR(fme_region);
                        goto destroy_region;
                }
index 3530ccd..da9781a 100644 (file)
@@ -41,6 +41,8 @@ struct adp5588_gpio {
        uint8_t int_en[3];
        uint8_t irq_mask[3];
        uint8_t irq_stat[3];
+       uint8_t int_input_en[3];
+       uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
        struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
        int i;
 
-       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+               if (dev->int_input_en[i]) {
+                       mutex_lock(&dev->lock);
+                       dev->dir[i] &= ~dev->int_input_en[i];
+                       dev->int_input_en[i] = 0;
+                       adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+                                          dev->dir[i]);
+                       mutex_unlock(&dev->lock);
+               }
+
+               if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+                       dev->int_lvl_cached[i] = dev->int_lvl[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+                                          dev->int_lvl[i]);
+               }
+
                if (dev->int_en[i] ^ dev->irq_mask[i]) {
                        dev->int_en[i] = dev->irq_mask[i];
                        adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
                                           dev->int_en[i]);
                }
+       }
 
        mutex_unlock(&dev->irq_lock);
 }
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
        else
                return -EINVAL;
 
-       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-                          dev->int_lvl[bank]);
+       dev->int_input_en[bank] |= bit;
 
        return 0;
 }
index 28da700..044888f 100644 (file)
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
 out_unregister:
        dwapb_gpio_unregister(gpio);
        dwapb_irq_teardown(gpio);
+       clk_disable_unprepare(gpio->clk);
 
        return err;
 }
index c48ed9d..8b9d7e4 100644 (file)
@@ -25,7 +25,6 @@
 
 struct acpi_gpio_event {
        struct list_head node;
-       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
        struct mutex conn_lock;
        struct gpio_chip *chip;
        struct list_head events;
+       struct list_head deferred_req_irqs_list_entry;
 };
 
-static LIST_HEAD(acpi_gpio_initial_sync_list);
-static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+/*
+ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * late_initcall_sync handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run.  This list contains gpiochips
+ * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
 
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
-static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
-static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       if (!list_empty(&event->initial_sync_list))
-               list_del_init(&event->initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
-       value = gpiod_get_value(desc);
+       value = gpiod_get_value_cansleep(desc);
 
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
-       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
         * may refer to OperationRegions from other (builtin) drivers which
         * may be probed after us.
         */
-       if (handler == acpi_gpio_irq_handler &&
-           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
-               acpi_gpio_add_to_initial_sync_list(event);
+       if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+           ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+               handler(event->irq, event);
 
        return AE_OK;
 
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        struct acpi_gpio_chip *acpi_gpio;
        acpi_handle handle;
        acpi_status status;
+       bool defer;
 
        if (!chip->parent || !chip->to_irq)
                return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       defer = !acpi_gpio_deferred_req_irqs_done;
+       if (defer)
+               list_add(&acpi_gpio->deferred_req_irqs_list_entry,
+                        &acpi_gpio_deferred_req_irqs_list);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+       if (defer)
+               return;
+
        acpi_walk_resources(handle, "_AEI",
                            acpi_gpiochip_request_interrupt, acpi_gpio);
 }
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
-               acpi_gpio_del_from_initial_sync_list(event);
-
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
 
        acpi_gpio->chip = chip;
        INIT_LIST_HEAD(&acpi_gpio->events);
+       INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
 
        status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
        if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
        return con_id == NULL;
 }
 
-/* Sync the initial state of handlers after all builtin drivers have probed */
-static int acpi_gpio_initial_sync(void)
+/* Run deferred acpi_gpiochip_request_interrupts() */
+static int acpi_gpio_handle_deferred_request_interrupts(void)
 {
-       struct acpi_gpio_event *event, *ep;
+       struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       list_for_each_entry_safe(acpi_gpio, tmp,
+                                &acpi_gpio_deferred_req_irqs_list,
+                                deferred_req_irqs_list_entry) {
+               acpi_handle handle;
 
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
-                                initial_sync_list) {
-               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
-               list_del_init(&event->initial_sync_list);
+               handle = ACPI_HANDLE(acpi_gpio->chip->parent);
+               acpi_walk_resources(handle, "_AEI",
+                                   acpi_gpiochip_request_interrupt, acpi_gpio);
+
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
        }
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       acpi_gpio_deferred_req_irqs_done = true;
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
 
        return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_initial_sync);
+late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
index a4f1157..d4e7a09 100644 (file)
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
        struct of_phandle_args *gpiospec = data;
 
        return chip->gpiodev->dev.of_node == gpiospec->np &&
+                               chip->of_xlate &&
                                chip->of_xlate(chip, gpiospec, NULL) >= 0;
 }
 
index b6e9df1..b31d121 100644 (file)
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
 {
        struct drm_gem_object *gobj;
        unsigned long size;
+       int r;
 
        gobj = drm_gem_object_lookup(p->filp, data->handle);
        if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
 
-       size = amdgpu_bo_size(p->uf_entry.robj);
-       if (size != PAGE_SIZE || (data->offset + 8) > size)
-               return -EINVAL;
-
-       *offset = data->offset;
-
        drm_gem_object_put_unlocked(gobj);
 
+       size = amdgpu_bo_size(p->uf_entry.robj);
+       if (size != PAGE_SIZE || (data->offset + 8) > size) {
+               r = -EINVAL;
+               goto error_unref;
+       }
+
        if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
-               amdgpu_bo_unref(&p->uf_entry.robj);
-               return -EINVAL;
+               r = -EINVAL;
+               goto error_unref;
        }
 
+       *offset = data->offset;
+
        return 0;
+
+error_unref:
+       amdgpu_bo_unref(&p->uf_entry.robj);
+       return r;
 }
 
 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1262,10 +1269,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 error_abort:
        dma_fence_put(&job->base.s_fence->finished);
        job->base.s_fence = NULL;
+       amdgpu_mn_unlock(p->mn);
 
 error_unlock:
        amdgpu_job_free(job);
-       amdgpu_mn_unlock(p->mn);
        return r;
 }
 
index 8ab5ccb..39bf2ce 100644 (file)
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_COMMON,
+               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_IH,
        };
 
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
 
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_SMC,
-               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_DCE,
                AMD_IP_BLOCK_TYPE_GFX,
                AMD_IP_BLOCK_TYPE_SDMA,
index e7ca462..7c3b634 100644 (file)
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
index 6e3f566..51ed99a 100644 (file)
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
-               switch (info->drm_format_mod << 10) {
-               case PLANE_CTL_TILED_LINEAR:
+               switch (info->drm_format_mod) {
+               case DRM_FORMAT_MOD_LINEAR:
                        tiling_mode = I915_TILING_NONE;
                        break;
-               case PLANE_CTL_TILED_X:
+               case I915_FORMAT_MOD_X_TILED:
                        tiling_mode = I915_TILING_X;
                        stride = info->stride;
                        break;
-               case PLANE_CTL_TILED_Y:
+               case I915_FORMAT_MOD_Y_TILED:
+               case I915_FORMAT_MOD_Yf_TILED:
                        tiling_mode = I915_TILING_Y;
                        stride = info->stride;
                        break;
                default:
-                       gvt_dbg_core("not supported tiling mode\n");
+                       gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
+                                    info->drm_format_mod);
                }
                obj->tiling_and_stride = tiling_mode | stride;
        } else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                info->height = p.height;
                info->stride = p.stride;
                info->drm_format = p.drm_format;
-               info->drm_format_mod = p.tiled;
+
+               switch (p.tiled) {
+               case PLANE_CTL_TILED_LINEAR:
+                       info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
+                       break;
+               case PLANE_CTL_TILED_X:
+                       info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       break;
+               case PLANE_CTL_TILED_Y:
+                       info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       break;
+               case PLANE_CTL_TILED_YF:
+                       info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       break;
+               default:
+                       gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
+               }
+
                info->size = (((p.stride * p.height * p.bpp) / 8) +
-                               (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
index face664..481896f 100644 (file)
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) {
-               plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
-               _PLANE_CTL_TILED_SHIFT;
+               plane->tiled = val & PLANE_CTL_TILED_MASK;
                fmt = skl_format_to_drm(
                        val & PLANE_CTL_FORMAT_MASK,
                        val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
                return  -EINVAL;
        }
 
-       plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+       plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
                (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) ?
index cb055f3..60c1550 100644 (file)
@@ -101,7 +101,7 @@ struct intel_gvt;
 /* color space conversion and gamma correction are not included */
 struct intel_vgpu_primary_plane_format {
        u8      enabled;        /* plane is enabled */
-       u8      tiled;          /* X-tiled */
+       u32     tiled;          /* tiling mode: linear, X-tiled, Y tiled, etc */
        u8      bpp;            /* bits per pixel */
        u32     hw_format;      /* format field in the PRI_CTL register */
        u32     drm_format;     /* format in DRM definition */
index 7a58ca5..72afa51 100644 (file)
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       write_vreg(vgpu, offset, p_data, bytes);
+
+       if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
+               vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
+       else
+               vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
+
+       return 0;
+}
+
 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
        unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
        u32 v = *(u32 *)p_data;
        u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
 
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+       switch (offset) {
+       case _PHY_CTL_FAMILY_EDP:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+               break;
+       case _PHY_CTL_FAMILY_DDI:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+               break;
+       }
 
        vgpu_vreg(vgpu, offset) = v;
 
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
                skl_power_well_ctl_write);
 
+       MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+
        MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
                NULL, gen9_trtte_write);
        MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
 
-       MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
-
        MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
        MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
-               NULL, NULL);
+                NULL, NULL);
+       MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
 
        MMIO_D(_MMIO(0x4ab8), D_KBL);
        MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
index a45f46d..c7afee3 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/device.h>
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
        info = (struct kvmgt_guest_info *)handle;
        kvm = info->kvm;
 
-       if (kthread)
+       if (kthread) {
+               if (!mmget_not_zero(kvm->mm))
+                       return -EFAULT;
                use_mm(kvm->mm);
+       }
 
        idx = srcu_read_lock(&kvm->srcu);
        ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
                      kvm_read_guest(kvm, gpa, buf, len);
        srcu_read_unlock(&kvm->srcu, idx);
 
-       if (kthread)
+       if (kthread) {
                unuse_mm(kvm->mm);
+               mmput(kvm->mm);
+       }
 
        return ret;
 }
index 42e1e6b..e872f48 100644 (file)
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
         * performace for batch mmio read/write, so we need
         * handle forcewake mannually.
         */
-       intel_runtime_pm_get(dev_priv);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
        switch_mmio(pre, next, ring_id);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_runtime_pm_put(dev_priv);
 }
 
 /**
index fa75a2e..b0d3a43 100644 (file)
@@ -42,8 +42,6 @@
 #define DEVICE_TYPE_EFP3   0x20
 #define DEVICE_TYPE_EFP4   0x10
 
-#define DEV_SIZE       38
-
 struct opregion_header {
        u8 signature[16];
        u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
        u16 size; /* data size */
 } __packed;
 
+/* For supporting windows guest with opregion, here hardcode the emulated
+ * bdb header version as '186', and the corresponding child_device_config
+ * length should be '33' but not '38'.
+ */
 struct efp_child_device_config {
        u16 handle;
        u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
        u8 mipi_bridge_type; /* 171 */
        u16 device_class_ext;
        u8 dvo_function;
-       u8 dp_usb_type_c:1; /* 195 */
-       u8 skip6:7;
-       u8 dp_usb_type_c_2x_gpio_index; /* 195 */
-       u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
-       u8 iboost_dp:4; /* 196 */
-       u8 iboost_hdmi:4; /* 196 */
 } __packed;
 
 struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
        v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 
        strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
-       v->bdb_header.version = 186; /* child_dev_size = 38 */
+       v->bdb_header.version = 186; /* child_dev_size = 33 */
        v->bdb_header.header_size = sizeof(v->bdb_header);
 
        v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
 
        /* child device */
        num_child = 4; /* each port has one child */
+       v->general_definitions.child_dev_size =
+               sizeof(struct efp_child_device_config);
        v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
        /* size will include child devices */
        v->general_definitions_header.size =
-               sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
-       v->general_definitions.child_dev_size = DEV_SIZE;
+               sizeof(struct bdb_general_definitions) +
+                       num_child * v->general_definitions.child_dev_size;
 
        /* portA */
        v->child0.handle = DEVICE_TYPE_EFP1;
index 09d7bb7..c32e7d5 100644 (file)
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
        return false;
 }
 
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME  2
+
 struct vgpu_sched_data {
        struct list_head lru_list;
        struct intel_vgpu *vgpu;
        bool active;
-
+       bool pri_sched;
+       ktime_t pri_time;
        ktime_t sched_in_time;
        ktime_t sched_time;
        ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
                if (!vgpu_has_pending_workload(vgpu_data->vgpu))
                        continue;
 
+               if (vgpu_data->pri_sched) {
+                       if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+                               vgpu = vgpu_data->vgpu;
+                               break;
+                       } else
+                               vgpu_data->pri_sched = false;
+               }
+
                /* Return the vGPU only if it has time slice left */
                if (vgpu_data->left_ts > 0) {
                        vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct vgpu_sched_data *vgpu_data;
        struct intel_vgpu *vgpu = NULL;
+
        /* no active vgpu or has already had a target */
        if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
                goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        vgpu = find_busy_vgpu(sched_data);
        if (vgpu) {
                scheduler->next_vgpu = vgpu;
-
-               /* Move the last used vGPU to the tail of lru_list */
                vgpu_data = vgpu->sched_data;
-               list_del_init(&vgpu_data->lru_list);
-               list_add_tail(&vgpu_data->lru_list,
-                               &sched_data->lru_runq_head);
+               if (!vgpu_data->pri_sched) {
+                       /* Move the last used vGPU to the tail of lru_list */
+                       list_del_init(&vgpu_data->lru_list);
+                       list_add_tail(&vgpu_data->lru_list,
+                                     &sched_data->lru_runq_head);
+               }
        } else {
                scheduler->next_vgpu = gvt->idle_vgpu;
        }
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 {
        struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       ktime_t now;
 
        if (!list_empty(&vgpu_data->lru_list))
                return;
 
-       list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+       now = ktime_get();
+       vgpu_data->pri_time = ktime_add(now,
+                                       ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+       vgpu_data->pri_sched = true;
+
+       list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
 
        if (!hrtimer_active(&sched_data->timer))
                hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                &vgpu->gvt->scheduler;
        int ring_id;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        if (!vgpu_data->active)
                return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->current_vgpu = NULL;
        }
 
+       intel_runtime_pm_get(dev_priv);
        spin_lock_bh(&scheduler->mmio_context_lock);
        for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
                if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&vgpu->gvt->sched_lock);
 }
index 08ec744..9e63cd4 100644 (file)
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
 #define  DSC_INITIAL_DEC_DELAY(dec_delay)       ((dec_delay) << 16)
 #define  DSC_INITIAL_XMIT_DELAY(xmit_delay)     ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
 #define  DSC_SCALE_DEC_INTINT(scale_dec)       ((scale_dec) << 16)
 #define  DSC_SCALE_INC_INT(scale_inc)          ((scale_inc) << 0)
index 8761513..c9af348 100644 (file)
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       if (!is_mst)
+               intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
-       intel_ddi_disable_pipe_clock(old_crtc_state);
-
-       /*
-        * Power down sink before disabling the port, otherwise we end
-        * up getting interrupts from the sink on detecting link loss.
-        */
-       if (!is_mst)
+       if (!is_mst) {
+               intel_ddi_disable_pipe_clock(old_crtc_state);
+               /*
+                * Power down sink before disabling the port, otherwise we end
+                * up getting interrupts from the sink on detecting link loss.
+                */
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+       }
 
        intel_disable_ddi_buf(encoder);
 
index 4a3c8ee..d295109 100644 (file)
@@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->pcu_lock);
-               /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+               /*
+                * Wait for PCODE to finish disabling IPS. The BSpec specified
+                * 42ms timeout value leads to occasional timeouts so use 100ms
+                * instead.
+                */
                if (intel_wait_for_register(dev_priv,
                                            IPS_CTL, IPS_ENABLE, 0,
-                                           42))
+                                           100))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
index cd0f649..1193202 100644 (file)
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
 }
 
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx)
 {
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+                   struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
                 */
                status = connector_status_disconnected;
                goto out;
+       } else {
+               /*
+                * If display is now connected check links status,
+                * there has been known issues of link loss triggering
+                * long pulse.
+                *
+                * Some sinks (eg. ASUS PB287Q) seem to perform some
+                * weird HPD ping pong during modesets. So we can apparently
+                * end up with HPD going low during a modeset, and then
+                * going back up soon after. And once that happens we must
+                * retrain the link to get a picture. That's in case no
+                * userspace component reacted to intermittent HPD dip.
+                */
+               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+               intel_dp_retrain_link(encoder, ctx);
        }
 
        /*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
                                return ret;
                }
 
-               status = intel_dp_long_pulse(intel_dp->attached_connector);
+               status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
        }
 
        intel_dp->detect_done = false;
index 7e3e016..4ecd653 100644 (file)
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /* this can fail */
        drm_dp_check_act_status(&intel_dp->mst_mgr);
        /* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
        I915_WRITE(DP_TP_STATUS(port), temp);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+       intel_ddi_enable_pipe_clock(pipe_config);
 }
 
 static void intel_mst_enable_dp(struct intel_encoder *encoder,
index c2f10d8..443dfae 100644 (file)
@@ -181,8 +181,9 @@ struct intel_overlay {
        u32 brightness, contrast, saturation;
        u32 old_xscale, old_yscale;
        /* register access */
-       u32 flip_addr;
        struct drm_i915_gem_object *reg_bo;
+       struct overlay_registers __iomem *regs;
+       u32 flip_addr;
        /* flip handling */
        struct i915_gem_active last_flip;
 };
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
                                  PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
 }
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
-                                        overlay->flip_addr,
-                                        PAGE_SIZE);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
-                                    struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap(regs);
-}
-
 static void intel_overlay_submit_request(struct intel_overlay *overlay,
                                         struct i915_request *rq,
                                         i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      struct drm_i915_gem_object *new_bo,
                                      struct put_image_params *params)
 {
-       int ret, tmp_width;
-       struct overlay_registers __iomem *regs;
-       bool scale_changed = false;
+       struct overlay_registers __iomem *regs = overlay->regs;
        struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
+       bool scale_changed = false;
        struct i915_vma *vma;
+       int ret, tmp_width;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        if (!overlay->active) {
                u32 oconfig;
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unpin;
-               }
+
                oconfig = OCONF_CC_OUT_8BIT;
                if (IS_GEN4(dev_priv))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
                iowrite32(oconfig, &regs->OCONFIG);
-               intel_overlay_unmap_regs(overlay, regs);
 
                ret = intel_overlay_on(overlay);
                if (ret != 0)
                        goto out_unpin;
        }
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs) {
-               ret = -ENOMEM;
-               goto out_unpin;
-       }
-
        iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
        iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
 
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        iowrite32(overlay_cmd_reg(params), &regs->OCMD);
 
-       intel_overlay_unmap_regs(overlay, regs);
-
        ret = intel_overlay_continue(overlay, vma, scale_changed);
        if (ret)
                goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        if (ret != 0)
                return ret;
 
-       regs = intel_overlay_map_regs(overlay);
-       iowrite32(0, &regs->OCMD);
-       intel_overlay_unmap_regs(overlay, regs);
+       iowrite32(0, &overlay->regs->OCMD);
 
        return intel_overlay_off(overlay);
 }
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
        struct drm_intel_overlay_attrs *attrs = data;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                overlay->contrast   = attrs->contrast;
                overlay->saturation = attrs->saturation;
 
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unlock;
-               }
-
-               update_reg_attrs(overlay, regs);
-
-               intel_overlay_unmap_regs(overlay, regs);
+               update_reg_attrs(overlay, overlay->regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
                        if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
        return ret;
 }
 
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+       if (obj == NULL)
+               obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_put_bo;
+       }
+
+       if (use_phys)
+               overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+       else
+               overlay->flip_addr = i915_ggtt_offset(vma);
+       overlay->regs = i915_vma_pin_iomap(vma);
+       i915_vma_unpin(vma);
+
+       if (IS_ERR(overlay->regs)) {
+               err = PTR_ERR(overlay->regs);
+               goto err_put_bo;
+       }
+
+       overlay->reg_bo = obj;
+       return 0;
+
+err_put_bo:
+       i915_gem_object_put(obj);
+       return err;
+}
+
 void intel_setup_overlay(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay;
-       struct drm_i915_gem_object *reg_bo;
-       struct overlay_registers __iomem *regs;
-       struct i915_vma *vma = NULL;
        int ret;
 
        if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        if (!overlay)
                return;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       if (WARN_ON(dev_priv->overlay))
-               goto out_free;
-
        overlay->i915 = dev_priv;
 
-       reg_bo = NULL;
-       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
-       if (reg_bo == NULL)
-               reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
-       if (IS_ERR(reg_bo))
-               goto out_free;
-       overlay->reg_bo = reg_bo;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
-               ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
-               if (ret) {
-                       DRM_ERROR("failed to attach phys overlay regs\n");
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = reg_bo->phys_handle->busaddr;
-       } else {
-               vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
-                                              0, PAGE_SIZE, PIN_MAPPABLE);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin overlay register bo\n");
-                       ret = PTR_ERR(vma);
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = i915_ggtt_offset(vma);
-
-               ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
-               if (ret) {
-                       DRM_ERROR("failed to move overlay register bo into the GTT\n");
-                       goto out_unpin_bo;
-               }
-       }
-
-       /* init all values */
        overlay->color_key = 0x0101fe;
        overlay->color_key_enabled = true;
        overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 
        init_request_active(&overlay->last_flip, NULL);
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs)
-               goto out_unpin_bo;
+       mutex_lock(&dev_priv->drm.struct_mutex);
+
+       ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+       if (ret)
+               goto out_free;
+
+       ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
+       if (ret)
+               goto out_reg_bo;
 
-       memset_io(regs, 0, sizeof(struct overlay_registers));
-       update_polyphase_filter(regs);
-       update_reg_attrs(overlay, regs);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       intel_overlay_unmap_regs(overlay, regs);
+       memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+       update_polyphase_filter(overlay->regs);
+       update_reg_attrs(overlay, overlay->regs);
 
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-       DRM_INFO("initialized overlay support\n");
+       DRM_INFO("Initialized overlay support.\n");
        return;
 
-out_unpin_bo:
-       if (vma)
-               i915_vma_unpin(vma);
-out_free_bo:
-       i915_gem_object_put(reg_bo);
+out_reg_bo:
+       i915_gem_object_put(overlay->reg_bo);
 out_free:
        mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
-       return;
 }
 
 void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
 {
-       if (!dev_priv->overlay)
+       struct intel_overlay *overlay;
+
+       overlay = fetch_and_zero(&dev_priv->overlay);
+       if (!overlay)
                return;
 
-       /* The bo's should be free'd by the generic code already.
+       /*
+        * The bo's should be free'd by the generic code already.
         * Furthermore modesetting teardown happens beforehand so the
-        * hardware should be off already */
-       WARN_ON(dev_priv->overlay->active);
+        * hardware should be off already.
+        */
+       WARN_ON(overlay->active);
+
+       i915_gem_object_put(overlay->reg_bo);
 
-       i915_gem_object_put(dev_priv->overlay->reg_bo);
-       kfree(dev_priv->overlay);
+       kfree(overlay);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
        u32 isr;
 };
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               /* Cast to make sparse happy, but it's wc memory anyway, so
-                * equivalent to the wc io mapping on X86. */
-               regs = (struct overlay_registers __iomem *)
-                       overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
-                                               overlay->flip_addr);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
-                                       struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap_atomic(regs);
-}
-
 struct intel_overlay_error_state *
 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay = dev_priv->overlay;
        struct intel_overlay_error_state *error;
-       struct overlay_registers __iomem *regs;
 
        if (!overlay || !overlay->active)
                return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
        error->isr = I915_READ(ISR);
        error->base = overlay->flip_addr;
 
-       regs = intel_overlay_map_regs_atomic(overlay);
-       if (!regs)
-               goto err;
-
-       memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay, regs);
+       memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
 
        return error;
-
-err:
-       kfree(error);
-       return NULL;
 }
 
 void
index 8412119..5691dfa 100644 (file)
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
        int ret;
 
        if (dpcd >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+               /* Even if we're enabling MST, start with disabling the
+                * branching unit to clear any sink-side MST topology state
+                * that wasn't set by us
+                */
+               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
                if (ret < 0)
                        return ret;
 
-               dpcd &= ~DP_MST_EN;
-               if (state)
-                       dpcd |= DP_MST_EN;
-
-               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
-               if (ret < 0)
-                       return ret;
+               if (state) {
+                       /* Now, start initializing */
+                       ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
+                                                DP_MST_EN);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
 int
 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
 {
-       int ret, state = 0;
+       struct drm_dp_aux *aux;
+       int ret;
+       bool old_state, new_state;
+       u8 mstm_ctrl;
 
        if (!mstm)
                return 0;
 
-       if (dpcd[0] >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+       mutex_lock(&mstm->mgr.lock);
+
+       old_state = mstm->mgr.mst_state;
+       new_state = old_state;
+       aux = mstm->mgr.aux;
+
+       if (old_state) {
+               /* Just check that the MST hub is still as we expect it */
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
+               if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
+                       DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
+                       new_state = false;
+               }
+       } else if (dpcd[0] >= 0x12) {
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
                if (ret < 0)
-                       return ret;
+                       goto probe_error;
 
                if (!(dpcd[1] & DP_MST_CAP))
                        dpcd[0] = 0x11;
                else
-                       state = allow;
+                       new_state = allow;
+       }
+
+       if (new_state == old_state) {
+               mutex_unlock(&mstm->mgr.lock);
+               return new_state;
        }
 
-       ret = nv50_mstm_enable(mstm, dpcd[0], state);
+       ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
        if (ret)
-               return ret;
+               goto probe_error;
+
+       mutex_unlock(&mstm->mgr.lock);
 
-       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
        if (ret)
                return nv50_mstm_enable(mstm, dpcd[0], 0);
 
-       return mstm->mgr.mst_state;
+       return new_state;
+
+probe_error:
+       mutex_unlock(&mstm->mgr.lock);
+       return ret;
 }
 
 static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
 static const struct drm_mode_config_funcs
 nv50_disp_func = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
        .atomic_check = nv50_disp_atomic_check,
        .atomic_commit = nv50_disp_atomic_commit,
        .atomic_state_alloc = nv50_disp_atomic_state_alloc,
index 51932c7..247f72c 100644 (file)
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
 nouveau_connector_ddc_detect(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct nouveau_connector *nv_connector = nouveau_connector(connector);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
-       struct nouveau_encoder *nv_encoder = NULL;
+       struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
        struct drm_encoder *encoder;
-       int i, panel = -ENODEV;
-
-       /* eDP panels need powering on by us (if the VBIOS doesn't default it
-        * to on) before doing any AUX channel transactions.  LVDS panel power
-        * is handled by the SOR itself, and not required for LVDS DDC.
-        */
-       if (nv_connector->type == DCB_CONNECTOR_eDP) {
-               panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
-               if (panel == 0) {
-                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
-                       msleep(300);
-               }
-       }
+       int i, ret;
+       bool switcheroo_ddc = false;
 
        drm_connector_for_each_possible_encoder(connector, encoder, i) {
                nv_encoder = nouveau_encoder(encoder);
 
-               if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-                       int ret = nouveau_dp_detect(nv_encoder);
+               switch (nv_encoder->dcb->type) {
+               case DCB_OUTPUT_DP:
+                       ret = nouveau_dp_detect(nv_encoder);
                        if (ret == NOUVEAU_DP_MST)
                                return NULL;
-                       if (ret == NOUVEAU_DP_SST)
-                               break;
-               } else
-               if ((vga_switcheroo_handler_flags() &
-                    VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
-                   nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
-                   nv_encoder->i2c) {
-                       int ret;
-                       vga_switcheroo_lock_ddc(dev->pdev);
-                       ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
-                       vga_switcheroo_unlock_ddc(dev->pdev);
-                       if (ret)
+                       else if (ret == NOUVEAU_DP_SST)
+                               found = nv_encoder;
+
+                       break;
+               case DCB_OUTPUT_LVDS:
+                       switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
+                                           VGA_SWITCHEROO_CAN_SWITCH_DDC);
+               /* fall-through */
+               default:
+                       if (!nv_encoder->i2c)
                                break;
-               } else
-               if (nv_encoder->i2c) {
+
+                       if (switcheroo_ddc)
+                               vga_switcheroo_lock_ddc(dev->pdev);
                        if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
-                               break;
+                               found = nv_encoder;
+                       if (switcheroo_ddc)
+                               vga_switcheroo_unlock_ddc(dev->pdev);
+
+                       break;
                }
+               if (found)
+                       break;
        }
 
-       /* eDP panel not detected, restore panel power GPIO to previous
-        * state to avoid confusing the SOR for other output types.
-        */
-       if (!nv_encoder && panel == 0)
-               nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
-
-       return nv_encoder;
+       return found;
 }
 
 static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                nv_connector->edid = NULL;
        }
 
-       /* Outputs are only polled while runtime active, so acquiring a
-        * runtime PM ref here is unnecessary (and would deadlock upon
-        * runtime suspend because it waits for polling to finish).
+       /* Outputs are only polled while runtime active, so resuming the
+        * device here is unnecessary (and would deadlock upon runtime suspend
+        * because it waits for polling to finish). We do however, want to
+        * prevent the autosuspend timer from elapsing during this operation
+        * if possible.
         */
-       if (!drm_kms_helper_is_poll_worker()) {
-               ret = pm_runtime_get_sync(connector->dev->dev);
+       if (drm_kms_helper_is_poll_worker()) {
+               pm_runtime_get_noresume(dev->dev);
+       } else {
+               ret = pm_runtime_get_sync(dev->dev);
                if (ret < 0 && ret != -EACCES)
                        return conn_status;
        }
@@ -638,10 +628,8 @@ detect_analog:
 
  out:
 
-       if (!drm_kms_helper_is_poll_worker()) {
-               pm_runtime_mark_last_busy(connector->dev->dev);
-               pm_runtime_put_autosuspend(connector->dev->dev);
-       }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 
        return conn_status;
 }
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
        const struct nvif_notify_conn_rep_v0 *rep = notify->data;
        const char *name = connector->name;
        struct nouveau_encoder *nv_encoder;
+       int ret;
+
+       ret = pm_runtime_get(drm->dev->dev);
+       if (ret == 0) {
+               /* We can't block here if there's a pending PM request
+                * running, as we'll deadlock nouveau_display_fini() when it
+                * calls nvif_put() on our nvif_notify struct. So, simply
+                * defer the hotplug event until the device finishes resuming
+                */
+               NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
+                        name);
+               schedule_work(&drm->hpd_work);
+
+               pm_runtime_put_noidle(drm->dev->dev);
+               return NVIF_NOTIFY_KEEP;
+       } else if (ret != 1 && ret != -EACCES) {
+               NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
+                       name, ret);
+               return NVIF_NOTIFY_DROP;
+       }
 
        if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
                NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
                drm_helper_hpd_irq_event(connector->dev);
        }
 
+       pm_runtime_mark_last_busy(drm->dev->dev);
+       pm_runtime_put_autosuspend(drm->dev->dev);
        return NVIF_NOTIFY_KEEP;
 }
 
index 139368b..540c0cb 100644 (file)
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
 
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
        pm_runtime_get_sync(drm->dev->dev);
 
        drm_helper_hpd_irq_event(drm->dev);
-       /* enable polling for external displays */
-       drm_kms_helper_poll_enable(drm->dev);
 
        pm_runtime_mark_last_busy(drm->dev->dev);
        pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
 {
        struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
        struct acpi_bus_event *info = data;
+       int ret;
 
        if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
                if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
-                       /*
-                        * This may be the only indication we receive of a
-                        * connector hotplug on a runtime suspended GPU,
-                        * schedule hpd_work to check.
-                        */
-                       schedule_work(&drm->hpd_work);
+                       ret = pm_runtime_get(drm->dev->dev);
+                       if (ret == 1 || ret == -EACCES) {
+                               /* If the GPU is already awake, or in a state
+                                * where we can't wake it up, it can handle
+                                * it's own hotplug events.
+                                */
+                               pm_runtime_put_autosuspend(drm->dev->dev);
+                       } else if (ret == 0) {
+                               /* This may be the only indication we receive
+                                * of a connector hotplug on a runtime
+                                * suspended GPU, schedule hpd_work to check.
+                                */
+                               NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+                               schedule_work(&drm->hpd_work);
+                               pm_runtime_put_noidle(drm->dev->dev);
+                       } else {
+                               NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+                                       ret);
+                       }
 
                        /* acpi-video should not generate keypresses for this */
                        return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       /* enable connector detection and polling for connectors without HPD
+        * support
+        */
+       drm_kms_helper_poll_enable(dev);
+
        /* enable hotplug interrupts */
        drm_connector_list_iter_begin(dev, &conn_iter);
        nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
 }
 
 void
-nouveau_display_fini(struct drm_device *dev, bool suspend)
+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
 {
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        }
        drm_connector_list_iter_end(&conn_iter);
 
+       if (!runtime)
+               cancel_work_sync(&drm->hpd_work);
+
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
 }
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
                        }
                }
 
-               nouveau_display_fini(dev, true);
+               nouveau_display_fini(dev, true, runtime);
                return 0;
        }
 
-       nouveau_display_fini(dev, true);
+       nouveau_display_fini(dev, true, runtime);
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
index 54aa7c3..ff92b54 100644 (file)
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
 int  nouveau_display_create(struct drm_device *dev);
 void nouveau_display_destroy(struct drm_device *dev);
 int  nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev, bool suspend);
+void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
 int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
 void nouveau_display_resume(struct drm_device *dev, bool runtime);
 int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
index c7ec86d..74d2283 100644 (file)
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                mutex_unlock(&drm->master.lock);
        }
        if (ret) {
-               NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
                goto done;
        }
 
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
-               NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->device.object, mmus);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MMU class\n");
+               NV_PRINTK(err, cli, "No supported MMU class\n");
                goto done;
        }
 
        ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
        if (ret) {
-               NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, vmms);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported VMM class\n");
+               NV_PRINTK(err, cli, "No supported VMM class\n");
                goto done;
        }
 
        ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
        if (ret) {
-               NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, mems);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MEM class\n");
+               NV_PRINTK(err, cli, "No supported MEM class\n");
                goto done;
        }
 
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
                pm_runtime_allow(dev->dev);
                pm_runtime_mark_last_busy(dev->dev);
                pm_runtime_put(dev->dev);
-       } else {
-               /* enable polling for external displays */
-               drm_kms_helper_poll_enable(dev);
        }
+
        return 0;
 
 fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
        nouveau_debugfs_fini(drm);
 
        if (dev->mode_config.num_crtc)
-               nouveau_display_fini(dev, false);
+               nouveau_display_fini(dev, false, false);
        nouveau_display_destroy(dev);
 
        nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
                return -EBUSY;
        }
 
-       drm_kms_helper_poll_disable(drm_dev);
        nouveau_switcheroo_optimus_dsm();
        ret = nouveau_do_suspend(drm_dev, true);
        pci_save_state(pdev);
index 844498c..0f64c0a 100644 (file)
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
        console_unlock();
 
        if (state == FBINFO_STATE_RUNNING) {
+               nouveau_fbcon_hotplug_resume(drm->fbcon);
                pm_runtime_mark_last_busy(drm->dev->dev);
                pm_runtime_put_sync(drm->dev->dev);
        }
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
        schedule_work(&drm->fbcon_work);
 }
 
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_fbdev *fbcon = drm->fbcon;
+       int ret;
+
+       if (!fbcon)
+               return;
+
+       mutex_lock(&fbcon->hotplug_lock);
+
+       ret = pm_runtime_get(dev->dev);
+       if (ret == 1 || ret == -EACCES) {
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+
+               pm_runtime_mark_last_busy(dev->dev);
+               pm_runtime_put_autosuspend(dev->dev);
+       } else if (ret == 0) {
+               /* If the GPU was already in the process of suspending before
+                * this event happened, then we can't block here as we'll
+                * deadlock the runtime pmops since they wait for us to
+                * finish. So, just defer this event for when we runtime
+                * resume again. It will be handled by fbcon_work.
+                */
+               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+               fbcon->hotplug_waiting = true;
+               pm_runtime_put_noidle(drm->dev->dev);
+       } else {
+               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+                        ret);
+       }
+
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+       struct nouveau_drm *drm;
+
+       if (!fbcon)
+               return;
+       drm = nouveau_drm(fbcon->helper.dev);
+
+       mutex_lock(&fbcon->hotplug_lock);
+       if (fbcon->hotplug_waiting) {
+               fbcon->hotplug_waiting = false;
+
+               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+       }
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
 int
 nouveau_fbcon_init(struct drm_device *dev)
 {
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 
        drm->fbcon = fbcon;
        INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+       mutex_init(&fbcon->hotplug_lock);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index a6f192e..db9d520 100644 (file)
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
        struct nvif_object gdi;
        struct nvif_object blit;
        struct nvif_object twod;
+
+       struct mutex hotplug_lock;
+       bool hotplug_waiting;
 };
 
 void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
 void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
 void nouveau_fbcon_accel_restore(struct drm_device *dev);
 
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
 extern int nouveau_nofbaccel;
 
 #endif /* __NV50_FBCON_H__ */
index 3da5a43..8f1ce48 100644 (file)
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                pr_err("VGA switcheroo: switched nouveau on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                nouveau_pmops_resume(&pdev->dev);
-               drm_kms_helper_poll_enable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("VGA switcheroo: switched nouveau off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               drm_kms_helper_poll_disable(dev);
                nouveau_switcheroo_optimus_dsm();
                nouveau_pmops_suspend(&pdev->dev);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
index 32fa94a..cbd33e8 100644 (file)
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        struct nvkm_outp *outp, *outt, *pair;
        struct nvkm_conn *conn;
        struct nvkm_head *head;
+       struct nvkm_ior *ior;
        struct nvbios_connE connE;
        struct dcb_output dcbE;
        u8  hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
                        return ret;
        }
 
+       /* Enforce identity-mapped SOR assignment for panels, which have
+        * certain bits (ie. backlight controls) wired to a specific SOR.
+        */
+       list_for_each_entry(outp, &disp->outp, head) {
+               if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+                   outp->conn->info.type == DCB_CONNECTOR_eDP) {
+                       ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+                       if (!WARN_ON(!ior))
+                               ior->identity = true;
+                       outp->identity = true;
+               }
+       }
+
        i = 0;
        list_for_each_entry(head, &disp->head, head)
                i = max(i, head->id + 1);
index 7c5bed2..5f301e6 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
+#include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
 #include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
 }
 
 static void
-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
 
-       /* Prevent link from being retrained if sink sends an IRQ. */
-       atomic_set(&dp->lt.done, 0);
-       ior->dp.nr = 0;
-
        /* Execute DisableLT script from DP Info Table. */
        nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
                init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
        );
 }
 
+static void
+nvkm_dp_release(struct nvkm_outp *outp)
+{
+       struct nvkm_dp *dp = nvkm_dp(outp);
+
+       /* Prevent link from being retrained if sink sends an IRQ. */
+       atomic_set(&dp->lt.done, 0);
+       dp->outp.ior->dp.nr = 0;
+}
+
 static int
 nvkm_dp_acquire(struct nvkm_outp *outp)
 {
@@ -491,7 +498,7 @@ done:
        return ret;
 }
 
-static void
+static bool
 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 {
        struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 
                if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
                                sizeof(dp->dpcd)))
-                       return;
+                       return true;
        }
 
        if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
        }
 
        atomic_set(&dp->lt.done, 0);
+       return false;
 }
 
 static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
 static void
 nvkm_dp_init(struct nvkm_outp *outp)
 {
+       struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
        struct nvkm_dp *dp = nvkm_dp(outp);
+
        nvkm_notify_put(&dp->outp.conn->hpd);
-       nvkm_dp_enable(dp, true);
+
+       /* eDP panels need powering on by us (if the VBIOS doesn't default it
+        * to on) before doing any AUX channel transactions.  LVDS panel power
+        * is handled by the SOR itself, and not required for LVDS DDC.
+        */
+       if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
+               int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+               if (power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+
+               /* We delay here unconditionally, even if already powered,
+                * because some laptop panels having a significant resume
+                * delay before the panel begins responding.
+                *
+                * This is likely a bit of a hack, but no better idea for
+                * handling this at the moment.
+                */
+               msleep(300);
+
+               /* If the eDP panel can't be detected, we need to restore
+                * the panel power GPIO to avoid breaking another output.
+                */
+               if (!nvkm_dp_enable(dp, true) && power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
+       } else {
+               nvkm_dp_enable(dp, true);
+       }
+
        nvkm_notify_get(&dp->hpd);
 }
 
@@ -576,6 +613,7 @@ nvkm_dp_func = {
        .fini = nvkm_dp_fini,
        .acquire = nvkm_dp_acquire,
        .release = nvkm_dp_release,
+       .disable = nvkm_dp_disable,
 };
 
 static int
index e0b4e0c..1991121 100644 (file)
@@ -16,6 +16,7 @@ struct nvkm_ior {
        char name[8];
 
        struct list_head head;
+       bool identity;
 
        struct nvkm_ior_state {
                struct nvkm_outp *outp;
index f89c7b9..def005d 100644 (file)
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
        nv50_disp_super_ied_off(head, ior, 2);
 
        /* If we're shutting down the OR's only active head, execute
-        * the output path's release function.
+        * the output path's disable function.
         */
        if (ior->arm.head == (1 << head->id)) {
-               if ((outp = ior->arm.outp) && outp->func->release)
-                       outp->func->release(outp, ior);
+               if ((outp = ior->arm.outp) && outp->func->disable)
+                       outp->func->disable(outp, ior);
        }
 }
 
index be9e7f8..c62030c 100644 (file)
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
        if (ior) {
                outp->acquired &= ~user;
                if (!outp->acquired) {
+                       if (outp->func->release && outp->ior)
+                               outp->func->release(outp);
                        outp->ior->asy.outp = NULL;
                        outp->ior = NULL;
                }
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
        if (proto == UNKNOWN)
                return -ENOSYS;
 
+       /* Deal with panels requiring identity-mapped SOR assignment. */
+       if (outp->identity) {
+               ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
+               if (WARN_ON(!ior))
+                       return -ENOSPC;
+               return nvkm_outp_acquire_ior(outp, user, ior);
+       }
+
        /* First preference is to reuse the OR that is currently armed
         * on HW, if any, in order to prevent unnecessary switching.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->arm.outp == outp)
+               if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
 
        /* Failing that, a completely unused OR is the next best thing. */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
+               if (!ior->identity &&
+                   !ior->asy.outp && ior->type == type && !ior->arm.outp &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
         * but will be released during the next modeset.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type &&
+               if (!ior->identity && !ior->asy.outp && ior->type == type &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
        outp->index = index;
        outp->info = *dcbE;
        outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
-       outp->or = ffs(outp->info.or) - 1;
 
        OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
                       "edid %x bus %d head %x",
index ea84d7d..6c8aa5c 100644 (file)
@@ -13,10 +13,10 @@ struct nvkm_outp {
        struct dcb_output info;
 
        struct nvkm_i2c_bus *i2c;
-       int or;
 
        struct list_head head;
        struct nvkm_conn *conn;
+       bool identity;
 
        /* Assembly state. */
 #define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
        void (*init)(struct nvkm_outp *);
        void (*fini)(struct nvkm_outp *);
        int (*acquire)(struct nvkm_outp *);
-       void (*release)(struct nvkm_outp *, struct nvkm_ior *);
+       void (*release)(struct nvkm_outp *);
+       void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
 };
 
 #define OUTP_MSG(o,l,f,a...) do {                                              \
index b80618e..17235e9 100644 (file)
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
        struct nvkm_bios *bios = subdev->device->bios;
        struct nvbios_pmuR pmu;
 
-       if (!nvbios_pmuRm(bios, type, &pmu)) {
-               nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
+       if (!nvbios_pmuRm(bios, type, &pmu))
                return -EINVAL;
-       }
 
        if (!post)
                return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                return -EINVAL;
        }
 
+       /* Upload DEVINIT application from VBIOS onto PMU. */
        ret = pmu_load(init, 0x04, post, &exec, &args);
-       if (ret)
+       if (ret) {
+               nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
                return ret;
+       }
 
-       /* upload first chunk of init data */
+       /* Upload tables required by opcodes in boot scripts. */
        if (post) {
-               // devinit tables
                u32 pmu = pmu_args(init, args + 0x08, 0x08);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
                pmu_data(init, pmu, img, len);
        }
 
-       /* upload second chunk of init data */
+       /* Upload boot scripts. */
        if (post) {
-               // devinit boot scripts
                u32 pmu = pmu_args(init, args + 0x08, 0x10);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
                pmu_data(init, pmu, img, len);
        }
 
-       /* execute init tables */
+       /* Execute DEVINIT. */
        if (post) {
                nvkm_wr32(device, 0x10a040, 0x00005000);
                pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                        return -ETIMEDOUT;
        }
 
-       /* load and execute some other ucode image (bios therm?) */
-       return pmu_load(init, 0x01, post, NULL, NULL);
+       /* Optional: Execute PRE_OS application on PMU, which should at
+        * least take care of fans until a full PMU has been loaded.
+        */
+       pmu_load(init, 0x01, post, NULL, NULL);
+       return 0;
 }
 
 static const struct nvkm_devinit_func
index de269eb..7459def 100644 (file)
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
 void
 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
-       if (vmm->func->part && inst) {
+       if (inst && vmm->func->part) {
                mutex_lock(&vmm->mutex);
                vmm->func->part(vmm, inst);
                mutex_unlock(&vmm->mutex);
index 25b7bd5..1cb4199 100644 (file)
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
-       if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+       if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+                       usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
                /* The fn key on Apple USB keyboards */
                set_bit(EV_REP, hi->input->evbit);
                hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
                .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
index 3da354a..44564f6 100644 (file)
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
        parser = vzalloc(sizeof(struct hid_parser));
        if (!parser) {
                ret = -ENOMEM;
-               goto err;
+               goto alloc_err;
        }
 
        parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
                                hid_err(device, "unbalanced delimiter at end of report description\n");
                                goto err;
                        }
+                       kfree(parser->collection_stack);
                        vfree(parser);
                        device->status |= HID_STAT_PARSED;
                        return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
 
        hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
 err:
+       kfree(parser->collection_stack);
+alloc_err:
        vfree(parser);
        hid_close_report(device);
        return ret;
index 79bdf0c..5146ee0 100644 (file)
@@ -88,6 +88,7 @@
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
 #define USB_VENDOR_ID_APPLE            0x05ac
+#define BT_VENDOR_ID_APPLE             0x004c
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE        0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD      0x030e
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI   0x026c
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
 #define I2C_VENDOR_ID_HANTICK          0x0911
 #define I2C_PRODUCT_ID_HANTICK_5288    0x5288
 
-#define I2C_VENDOR_ID_RAYD             0x2386
-#define I2C_PRODUCT_ID_RAYD_3118       0x3118
-
 #define USB_VENDOR_ID_HANWANG          0x0b57
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST     0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST      0x8fff
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
 #define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION    0x0ccd
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9      0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
index 4e94ea3..a481eaf 100644 (file)
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
        input_dev->dev.parent = &hid->dev;
 
        hidinput->input = input_dev;
+       hidinput->application = application;
        list_add_tail(&hidinput->list, &hid->inputs);
 
        INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
        struct hid_input *hidinput;
 
        list_for_each_entry(hidinput, &hid->inputs, list) {
-               if (hidinput->report &&
-                   hidinput->report->application == report->application)
+               if (hidinput->application == report->application)
                        return hidinput;
        }
 
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
                        input_unregister_device(hidinput->input);
                else
                        input_free_device(hidinput->input);
+               kfree(hidinput->name);
                kfree(hidinput);
        }
 
index 40fbb7c..da954f3 100644 (file)
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                                     struct hid_usage *usage,
                                     enum latency_mode latency,
                                     bool surface_switch,
-                                    bool button_switch)
+                                    bool button_switch,
+                                    bool *inputmode_found)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
        struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
 
        switch (usage->hid) {
        case HID_DG_INPUTMODE:
+               /*
+                * Some elan panels wrongly declare 2 input mode features,
+                * and silently ignore when we set the value in the second
+                * field. Skip the second feature and hope for the best.
+                */
+               if (*inputmode_found)
+                       return false;
+
                if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
                        report_len = hid_report_len(report);
                        buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                }
 
                field->value[index] = td->inputmode_value;
+               *inputmode_found = true;
                return true;
 
        case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
        struct hid_usage *usage;
        int i, j;
        bool update_report;
+       bool inputmode_found = false;
 
        rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
        list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
                                                             usage,
                                                             latency,
                                                             surface_switch,
-                                                            button_switch))
+                                                            button_switch,
+                                                            &inputmode_found))
                                        update_report = true;
                        }
                }
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
 
+       if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+               hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
        timer_setup(&td->release_timer, mt_expired_timeout, 0);
 
        ret = hid_parse(hdev);
index 39e6426..683861f 100644 (file)
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 50af72b..2b63487 100644 (file)
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
 }
 EXPORT_SYMBOL_GPL(sensor_hub_device_close);
 
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+               unsigned int *rsize)
+{
+       /*
+        * Checks if the report descriptor of Thinkpad Helix 2 has a logical
+        * minimum for magnetic flux axis greater than the maximum.
+        */
+       if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
+               *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
+               rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
+               rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
+               rdesc[921] == 0x07 && rdesc[922] == 0x00) {
+               /* Sets negative logical minimum for mag x, y and z */
+               rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
+               rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
+               rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
+               rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
+       }
+
+       return rdesc;
+}
+
 static int sensor_hub_probe(struct hid_device *hdev,
                                const struct hid_device_id *id)
 {
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
        .probe = sensor_hub_probe,
        .remove = sensor_hub_remove,
        .raw_event = sensor_hub_raw_event,
+       .report_fixup = sensor_hub_report_fixup,
 #ifdef CONFIG_PM
        .suspend = sensor_hub_suspend,
        .resume = sensor_hub_resume,
index 2ce194a..f307665 100644 (file)
@@ -170,8 +170,6 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
                I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
                I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { 0, 0 }
@@ -1235,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev)
        pm_runtime_enable(dev);
 
        enable_irq(client->irq);
-       ret = i2c_hid_hwreset(client);
+
+       /* Instead of resetting device, simply powers the device on. This
+        * solves "incomplete reports" on Raydium devices 2386:3118 and
+        * 2386:4B33
+        */
+       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
                return ret;
 
-       /* RAYDIUM device (2386:3118) need to re-send report descr cmd
+       /* Some devices need to re-send report descr cmd
         * after resume, after this it will be back normal.
         * otherwise it issues too many incomplete reports.
         */
index 97869b7..da13371 100644 (file)
@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 050f987..a1125a5 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index b1b548a..c71cc85 100644 (file)
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
        if (!attribute->show)
                return -EIO;
 
+       if (chan->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
+
        return attribute->show(chan, buf);
 }
 
index fb4e4a6..be5ba46 100644 (file)
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
 MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:raspberrypi-hwmon");
index 04b60a3..c91e145 100644 (file)
 
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
+#define SBREG_SMBCTRL_DNV      0xcf000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS         BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
        spin_unlock(&p2sb_spinlock);
 
        res = &tco_res[ICH_RES_MEM_OFF];
-       res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+       if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+       else
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
        res->end = res->start + 3;
        res->flags = IORESOURCE_MEM;
 
index 6d975f5..06c4c76 100644 (file)
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
 
 static const struct of_device_id lpi2c_imx_of_match[] = {
        { .compatible = "fsl,imx7ulp-lpi2c" },
-       { .compatible = "fsl,imx8dv-lpi2c" },
        { },
 };
 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
index 9918bdd..a403e85 100644 (file)
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
                if (ret)
index bb181b0..454f914 100644 (file)
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
                if (ret)
index 9a71e50..0c51c0f 100644 (file)
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 {
        u8 rx_watermark;
        struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+       unsigned long flags;
 
        /* Clear and enable Rx full interrupt. */
        xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
                rx_watermark = IIC_RX_FIFO_DEPTH;
        xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
 
+       local_irq_save(flags);
        if (!(msg->flags & I2C_M_NOSTART))
                /* write the address */
                xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 
        xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
                msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+       local_irq_restore(flags);
+
        if (i2c->nmsgs == 1)
                /* very last, enable bus not busy as well */
                xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
index 7589f2a..631360b 100644 (file)
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
 
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 {
-       u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
+       u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
        struct st_lsm6dsx_hw *hw = sensor->hw;
        struct st_lsm6dsx_sensor *cur_sensor;
        int i, err, data;
        __le16 wdata;
 
+       if (!hw->sip)
+               return 0;
+
        for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
                cur_sensor = iio_priv(hw->iio_devs[i]);
 
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
                                                       : cur_sensor->watermark;
 
                fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
-               sip += cur_sensor->sip;
        }
 
-       if (!sip)
-               return 0;
-
-       fifo_watermark = max_t(u16, fifo_watermark, sip);
-       fifo_watermark = (fifo_watermark / sip) * sip;
+       fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
+       fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
        fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
 
        err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
index 54e3832..c31b963 100644 (file)
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
 static const struct spi_device_id maxim_thermocouple_id[] = {
        {"max6675", MAX6675},
        {"max31855", MAX31855},
-       {"max31856", MAX31855},
        {},
 };
 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
index f726772..a36c949 100644 (file)
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        dgid = (union ib_gid *) &addr->sib_addr;
        pkey = ntohs(addr->sib_pkey);
 
+       mutex_lock(&lock);
        list_for_each_entry(cur_dev, &dev_list, list) {
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
                        if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                        cma_dev = cur_dev;
                                        sgid = gid;
                                        id_priv->id.port_num = p;
+                                       goto found;
                                }
                        }
                }
        }
-
-       if (!cma_dev)
-               return -ENODEV;
+       mutex_unlock(&lock);
+       return -ENODEV;
 
 found:
        cma_attach_to_dev(id_priv, cma_dev);
-       addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
-       memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+       mutex_unlock(&lock);
+       addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+       memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
        cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
        return 0;
 }
index 6eb64c6..c4118bc 100644 (file)
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
                WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
                if (!uverbs_destroy_uobject(obj, reason))
                        ret = 0;
+               else
+                       atomic_set(&obj->usecnt, 0);
        }
        return ret;
 }
index ec8fb28..5f437d1 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
 {
@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
        f = fdget(cmd.fd);
        if (!f.file)
                return -ENOENT;
+       if (f.file->f_op != &ucma_fops) {
+               ret = -EINVAL;
+               goto file_put;
+       }
 
        /* Validate current fd and prevent destruction of id. */
        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
index 823beca..6d974e2 100644 (file)
@@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
        uverbs_dev->num_comp_vectors = device->num_comp_vectors;
 
        if (ib_uverbs_create_uapi(device, uverbs_dev))
-               goto err;
+               goto err_uapi;
 
        cdev_init(&uverbs_dev->cdev, NULL);
        uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
 
 err_class:
        device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-
 err_cdev:
        cdev_del(&uverbs_dev->cdev);
+err_uapi:
        clear_bit(devnum, dev_map);
-
 err:
        if (atomic_dec_and_test(&uverbs_dev->refcount))
                ib_uverbs_comp_dev(uverbs_dev);
index bbfb86e..bc2b9e0 100644 (file)
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
                                "Failed to destroy Shadow QP");
                        return rc;
                }
+               bnxt_qplib_free_qp_res(&rdev->qplib_res,
+                                      &rdev->qp1_sqp->qplib_qp);
                mutex_lock(&rdev->qp_lock);
                list_del(&rdev->qp1_sqp->list);
                atomic_dec(&rdev->qp_count);
index e426b99..6ad0d46 100644 (file)
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
                                       struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_q *rq = &qp->rq;
-       struct bnxt_qplib_q *sq = &qp->rq;
+       struct bnxt_qplib_q *sq = &qp->sq;
        int rc = 0;
 
        if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
index b3203af..347fe18 100644 (file)
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
 
        if (qhp->ibqp.uobject) {
+
+               /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+               if (qhp->wq.flushed)
+                       return;
+
+               qhp->wq.flushed = 1;
                t4_set_wq_in_error(&qhp->wq, 0);
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
index eec8375..6c967dd 100644 (file)
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
                }
 
        /*
-        * A secondary bus reset (SBR) issues a hot reset to our device.
-        * The following routine does a 1s wait after the reset is dropped
-        * per PCI Trhfa (recovery time).  PCIe 3.0 section 6.6.1 -
-        * Conventional Reset, paragraph 3, line 35 also says that a 1s
-        * delay after a reset is required.  Per spec requirements,
-        * the link is either working or not after that point.
+        * This is an end around to do an SBR during probe time. A new API needs
+        * to be implemented to have cleaner interface but this fixes the
+        * current brokenness
         */
-       return pci_reset_bus(dev);
+       return pci_bridge_secondary_bus_reset(dev->bus->self);
 }
 
 /*
index ca0f1ee..0bbeaaa 100644 (file)
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->page_size_cap       = dev->dev->caps.page_size_cap;
        props->max_qp              = dev->dev->quotas.qp;
        props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
-       props->max_send_sge        = dev->dev->caps.max_sq_sg;
-       props->max_recv_sge        = dev->dev->caps.max_rq_sg;
-       props->max_sge_rd          = MLX4_MAX_SGE_RD;
+       props->max_send_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_recv_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_sge_rd = MLX4_MAX_SGE_RD;
        props->max_cq              = dev->dev->quotas.cq;
        props->max_cqe             = dev->dev->caps.max_cqes;
        props->max_mr              = dev->dev->quotas.mpt;
index ea01b8d..3d5424f 100644 (file)
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
 
        skb_queue_head_init(&skqueue);
 
+       netif_tx_lock_bh(p->dev);
        spin_lock_irq(&priv->lock);
        set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
        if (p->neigh)
                while ((skb = __skb_dequeue(&p->neigh->queue)))
                        __skb_queue_tail(&skqueue, skb);
        spin_unlock_irq(&priv->lock);
+       netif_tx_unlock_bh(p->dev);
 
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = p->dev;
index 316a575..c2df341 100644 (file)
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
  * The consequence of the above is that allocation is cost is low, but
  * freeing is expensive. We assumes that freeing rarely occurs.
  */
+#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
 static DEFINE_MUTEX(lpi_range_lock);
 static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
+       lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+                               ITS_MAX_LPI_NRBITS);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
index f266c81..0481223 100644 (file)
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
        int err;
 
        desc->tfm = essiv->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
        shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
        int i, r;
 
        desc->tfm = lmk->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
 
        /* calculate crc32 for every 32bit part and xor it */
        desc->tfm = tcw->crc32_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
        for (i = 0; i < 4; i++) {
                r = crypto_shash_init(desc);
                if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        skcipher_request_set_callback(ctx->r.req,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
 }
 
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        aead_request_set_callback(ctx->r.req_aead,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
 }
 
index 3788785..89ccb64 100644 (file)
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
        unsigned j, size;
 
        desc->tfm = ic->journal_mac;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 {
        int r;
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                      complete_journal_encrypt, comp);
        if (likely(encrypt))
                r = crypto_skcipher_encrypt(req);
index cae689d..5ba067f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
@@ -29,9 +29,6 @@
  */
 #define        MIN_RAID456_JOURNAL_SPACE (4*2048)
 
-/* Global list of all raid sets */
-static LIST_HEAD(raid_sets);
-
 static bool devices_handle_discard_safely = false;
 
 /*
@@ -227,7 +224,6 @@ struct rs_layout {
 
 struct raid_set {
        struct dm_target *ti;
-       struct list_head list;
 
        uint32_t stripe_cache_entries;
        unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
        mddev->new_chunk_sectors = l->new_chunk_sectors;
 }
 
-/* Find any raid_set in active slot for @rs on global list */
-static struct raid_set *rs_find_active(struct raid_set *rs)
-{
-       struct raid_set *r;
-       struct mapped_device *md = dm_table_get_md(rs->ti->table);
-
-       list_for_each_entry(r, &raid_sets, list)
-               if (r != rs && dm_table_get_md(r->ti->table) == md)
-                       return r;
-
-       return NULL;
-}
-
 /* raid10 algorithms (i.e. formats) */
 #define        ALGORITHM_RAID10_DEFAULT        0
 #define        ALGORITHM_RAID10_NEAR           1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
 
        mddev_init(&rs->md);
 
-       INIT_LIST_HEAD(&rs->list);
        rs->raid_disks = raid_devs;
        rs->delta_disks = 0;
 
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        for (i = 0; i < raid_devs; i++)
                md_rdev_init(&rs->dev[i].rdev);
 
-       /* Add @rs to global list. */
-       list_add(&rs->list, &raid_sets);
-
        /*
         * Remaining items to be initialized by further RAID params:
         *  rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        return rs;
 }
 
-/* Free all @rs allocations and remove it from global list. */
+/* Free all @rs allocations */
 static void raid_set_free(struct raid_set *rs)
 {
        int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
                        dm_put_device(rs->ti, rs->dev[i].data_dev);
        }
 
-       list_del(&rs->list);
-
        kfree(rs);
 }
 
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
                return 0;
        }
 
-       /* HM FIXME: get InSync raid_dev? */
+       /* HM FIXME: get In_Sync raid_dev? */
        rdev = &rs->dev[0].rdev;
 
        if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
                rs_set_new(rs);
        } else if (rs_is_recovering(rs)) {
+               /* Rebuild particular devices */
+               if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+                       set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+                       rs_setup_recovery(rs, MaxSector);
+               }
                /* A recovering raid set may be resized */
                ; /* skip setup rs */
        } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        /* Start raid set read-only and assumed clean to change in raid_resume() */
        rs->md.ro = 1;
        rs->md.in_sync = 1;
+
+       /* Keep array frozen */
        set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
 
        /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        rs->callbacks.congested_fn = raid_is_congested;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
-       /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
+       /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
        if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
                r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
                if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+/* Return sync state string for @state */
+enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
+static const char *sync_str(enum sync_state state)
+{
+       /* Has to be in above sync_state order! */
+       static const char *sync_strs[] = {
+               "frozen",
+               "reshape",
+               "resync",
+               "check",
+               "repair",
+               "recover",
+               "idle"
+       };
+
+       return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
+};
+
+/* Return enum sync_state for @mddev derived from @recovery flags */
+static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
        if (test_bit(MD_RECOVERY_FROZEN, &recovery))
-               return "frozen";
+               return st_frozen;
 
-       /* The MD sync thread can be done with io but still be running */
+       /* The MD sync thread can be done with io or be interrupted but still be running */
        if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
            (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
                if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
-                       return "reshape";
+                       return st_reshape;
 
                if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
                        if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
-                               return "resync";
-                       else if (test_bit(MD_RECOVERY_CHECK, &recovery))
-                               return "check";
-                       return "repair";
+                               return st_resync;
+                       if (test_bit(MD_RECOVERY_CHECK, &recovery))
+                               return st_check;
+                       return st_repair;
                }
 
                if (test_bit(MD_RECOVERY_RECOVER, &recovery))
-                       return "recover";
+                       return st_recover;
+
+               if (mddev->reshape_position != MaxSector)
+                       return st_reshape;
        }
 
-       return "idle";
+       return st_idle;
 }
 
 /*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                                sector_t resync_max_sectors)
 {
        sector_t r;
+       enum sync_state state;
        struct mddev *mddev = &rs->md;
 
        clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
        } else {
-               if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
-                   !test_bit(MD_RECOVERY_INTR, &recovery) &&
-                   (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
-                    test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
-                    test_bit(MD_RECOVERY_RUNNING, &recovery)))
-                       r = mddev->curr_resync_completed;
-               else
+               state = decipher_sync_action(mddev, recovery);
+
+               if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
                        r = mddev->recovery_cp;
+               else
+                       r = mddev->curr_resync_completed;
 
-               if (r >= resync_max_sectors &&
-                   (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
-                    (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
-                     !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
-                     !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
+               if (state == st_idle && r >= resync_max_sectors) {
                        /*
                         * Sync complete.
                         */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                        if (test_bit(MD_RECOVERY_RECOVER, &recovery))
                                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+               } else if (state == st_recover)
                        /*
                         * In case we are recovering, the array is not in sync
                         * and health chars should show the recovering legs.
                         */
                        ;
-
-               } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_resync)
                        /*
                         * If "resync" is occurring, the raid set
                         * is or may be out of sync hence the health
                         * characters shall be 'a'.
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
-
-               } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_reshape)
                        /*
                         * If "reshape" is occurring, the raid set
                         * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_check || state == st_repair)
                        /*
                         * If "check" or "repair" is occurring, the raid set has
                         * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               else {
+               else {
                        struct md_rdev *rdev;
 
                        /*
                         * We are idle and recovery is needed, prevent 'A' chars race
-                        * caused by components still set to in-sync by constrcuctor.
+                        * caused by components still set to in-sync by constructor.
                         */
                        if (test_bit(MD_RECOVERY_NEEDED, &recovery))
                                set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                progress = rs_get_progress(rs, recovery, resync_max_sectors);
                resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
                                    atomic64_read(&mddev->resync_mismatches) : 0;
-               sync_action = decipher_sync_action(&rs->md, recovery);
+               sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
 
                /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
                for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
        struct mddev *mddev = &rs->md;
        struct md_personality *pers = mddev->pers;
 
+       /* Don't allow the sync thread to work until the table gets reloaded. */
+       set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+
        r = rs_setup_reshape(rs);
        if (r)
                return r;
 
-       /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
-       if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
-               mddev_resume(mddev);
-
        /*
         * Check any reshape constraints enforced by the personalility
         *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
                }
        }
 
-       /* Suspend because a resume will happen in raid_resume() */
-       set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
-       mddev_suspend(mddev);
-
        /*
         * Now reshape got set up, update superblocks to
         * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
        if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
                return 0;
 
-       if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
-               struct raid_set *rs_active = rs_find_active(rs);
-
-               if (rs_active) {
-                       /*
-                        * In case no rebuilds have been requested
-                        * and an active table slot exists, copy
-                        * current resynchonization completed and
-                        * reshape position pointers across from
-                        * suspended raid set in the active slot.
-                        *
-                        * This resumes the new mapping at current
-                        * offsets to continue recover/reshape without
-                        * necessarily redoing a raid set partially or
-                        * causing data corruption in case of a reshape.
-                        */
-                       if (rs_active->md.curr_resync_completed != MaxSector)
-                               mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
-                       if (rs_active->md.reshape_position != MaxSector)
-                               mddev->reshape_position = rs_active->md.reshape_position;
-               }
-       }
-
        /*
         * The superblocks need to be updated on disk if the
         * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 13, 2},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 7214202..74f6770 100644 (file)
@@ -188,6 +188,12 @@ struct dm_pool_metadata {
        unsigned long flags;
        sector_t data_block_size;
 
+       /*
+        * We reserve a section of the metadata for commit overhead.
+        * All reported space does *not* include this.
+        */
+       dm_block_t metadata_reserve;
+
        /*
         * Set if a transaction has to be aborted but the attempt to roll back
         * to the previous (good) transaction failed.  The only pool metadata
@@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+       int r;
+       dm_block_t total;
+       dm_block_t max_blocks = 4096; /* 16M */
+
+       r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+       if (r) {
+               DMERR("could not get size of metadata device");
+               pmd->metadata_reserve = max_blocks;
+       } else {
+               sector_div(total, 10);
+               pmd->metadata_reserve = min(max_blocks, total);
+       }
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                                               sector_t data_block_size,
                                               bool format_device)
@@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                return ERR_PTR(r);
        }
 
+       __set_metadata_reserve(pmd);
+
        return pmd;
 }
 
@@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
        down_read(&pmd->root_lock);
        if (!pmd->fail_io)
                r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+       if (!r) {
+               if (*result < pmd->metadata_reserve)
+                       *result = 0;
+               else
+                       *result -= pmd->metadata_reserve;
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
        int r = -EINVAL;
 
        down_write(&pmd->root_lock);
-       if (!pmd->fail_io)
+       if (!pmd->fail_io) {
                r = __resize_space_map(pmd->metadata_sm, new_count);
+               if (!r)
+                       __set_metadata_reserve(pmd);
+       }
        up_write(&pmd->root_lock);
 
        return r;
index 7bd60a1..aaf1ad4 100644 (file)
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
        PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
+
+       /*
+        * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+        */
+       PM_OUT_OF_METADATA_SPACE,
        PM_READ_ONLY,           /* metadata may not be changed */
+
        PM_FAIL,                /* all I/O fails */
 };
 
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+       return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+       return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+       int r;
+       const char *ooms_reason = NULL;
+       dm_block_t nr_free;
+
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+       if (r)
+               ooms_reason = "Could not get free metadata blocks";
+       else if (!nr_free)
+               ooms_reason = "No free metadata blocks";
+
+       if (ooms_reason && !is_read_only(pool)) {
+               DMERR("%s", ooms_reason);
+               set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+       }
+}
+
+static void check_for_data_space(struct pool *pool)
 {
        int r;
        dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY)
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
                metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-       else
-               check_for_space(pool);
+       else {
+               check_for_metadata_space(pool);
+               check_for_data_space(pool);
+       }
 
        return r;
 }
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
        }
 
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+       if (r) {
+               metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+               return r;
+       }
+
+       if (!free_blocks) {
+               /* Let's commit before we use up the metadata reserve. */
+               r = commit(pool);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
        case PM_OUT_OF_DATA_SPACE:
                return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
        case PM_FAIL:
                return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                error_retry_list(pool);
                break;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (old_mode != new_mode)
+               if (!is_read_only_pool_mode(old_mode))
                        notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
                DMINFO("%s: growing the metadata device from %llu to %llu blocks",
                       dm_device_name(pool->pool_md),
                       sb_metadata_dev_size, metadata_dev_size);
+
+               if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+                       set_pool_mode(pool, PM_WRITE);
+
                r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
                if (r) {
                        metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
                DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
                      dm_device_name(pool->pool_md));
                return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
        dm_block_t nr_blocks_data;
        dm_block_t nr_blocks_metadata;
        dm_block_t held_root;
+       enum pool_mode mode;
        char buf[BDEVNAME_SIZE];
        char buf2[BDEVNAME_SIZE];
        struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("- ");
 
-               if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+               mode = get_pool_mode(pool);
+               if (mode == PM_OUT_OF_DATA_SPACE)
                        DMEMIT("out_of_data_space ");
-               else if (pool->pf.mode == PM_READ_ONLY)
+               else if (is_read_only_pool_mode(mode))
                        DMEMIT("ro ");
                else
                        DMEMIT("rw ");
index 12decdb..fc65f0d 100644 (file)
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
 {
        struct scatterlist sg;
 
-       sg_init_one(&sg, data, len);
-       ahash_request_set_crypt(req, &sg, NULL, len);
-
-       return crypto_wait_req(crypto_ahash_update(req), wait);
+       if (likely(!is_vmalloc_addr(data))) {
+               sg_init_one(&sg, data, len);
+               ahash_request_set_crypt(req, &sg, NULL, len);
+               return crypto_wait_req(crypto_ahash_update(req), wait);
+       } else {
+               do {
+                       int r;
+                       size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+                       flush_kernel_vmap_range((void *)data, this_step);
+                       sg_init_table(&sg, 1);
+                       sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+                       ahash_request_set_crypt(req, &sg, NULL, this_step);
+                       r = crypto_wait_req(crypto_ahash_update(req), wait);
+                       if (unlikely(r))
+                               return r;
+                       data += this_step;
+                       len -= this_step;
+               } while (len);
+               return 0;
+       }
 }
 
 /*
index 94329e0..0b2af6e 100644 (file)
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
 static int resync_finish(struct mddev *mddev)
 {
        struct md_cluster_info *cinfo = mddev->cluster_info;
+       int ret = 0;
 
        clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
-       dlm_unlock_sync(cinfo->resync_lockres);
 
        /*
         * If resync thread is interrupted so we can't say resync is finished,
         * another node will launch resync thread to continue.
         */
-       if (test_bit(MD_CLOSING, &mddev->flags))
-               return 0;
-       else
-               return resync_info_update(mddev, 0, 0);
+       if (!test_bit(MD_CLOSING, &mddev->flags))
+               ret = resync_info_update(mddev, 0, 0);
+       dlm_unlock_sync(cinfo->resync_lockres);
+       return ret;
 }
 
 static int area_resyncing(struct mddev *mddev, int direction,
index 9818980..d6f7978 100644 (file)
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
                allow_barrier(conf);
        }
 
+       raise_barrier(conf, 0);
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = raid10_alloc_init_r10buf(conf);
        r10_bio->state = 0;
-       raise_barrier(conf, sectors_done != 0);
+       raise_barrier(conf, 1);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
        r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
        if (sector_nr <= last)
                goto read_more;
 
+       lower_barrier(conf);
+
        /* Now that we have done the whole section we can
         * update reshape_progress
         */
index a001808..bfb8114 100644 (file)
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
 extern void ppl_quiesce(struct r5conf *conf, int quiesce);
 extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
 
+static inline bool raid5_has_log(struct r5conf *conf)
+{
+       return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+}
+
 static inline bool raid5_has_ppl(struct r5conf *conf)
 {
        return test_bit(MD_HAS_PPL, &conf->mddev->flags);
index 4ce0d75..e4e98f4 100644 (file)
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
 {
        struct r5conf *conf = sh->raid_conf;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return false;
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
                !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
        sector_t newsize;
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        sectors &= ~((sector_t)conf->chunk_sectors - 1);
        newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        if (mddev->delta_disks == 0 &&
            mddev->new_layout == mddev->layout &&
index 31112f6..475e5b3 100644 (file)
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
                        if (ret < 0)
                                goto error;
                }
-       } else {
+       } else if (pdata) {
                for (i = 0; i < pdata->num_sub_devices; i++) {
                        pdata->sub_devices[i].dev.parent = dev;
                        ret = platform_device_register(&pdata->sub_devices[i]);
index eeb7eef..38f90e1 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/sysfs.h>
+#include <linux/nospec.h>
 
 static DEFINE_MUTEX(compass_mutex);
 
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
                return ret;
        if (val >= strlen(map))
                return -EINVAL;
+       val = array_index_nospec(val, strlen(map));
        mutex_lock(&compass_mutex);
        ret = compass_command(c, map[val]);
        mutex_unlock(&compass_mutex);
index 8f82bb9..b8aaa68 100644 (file)
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
        retrc = plpar_hcall_norets(H_REG_CRQ,
                                   vdev->unit_address,
                                   queue->msg_token, PAGE_SIZE);
-       retrc = rc;
+       rc = retrc;
 
        if (rc == H_RESOURCE)
                rc = ibmvmc_reset_crq_queue(adapter);
index 7bba62a..fc3872f 100644 (file)
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
        cl = cldev->cl;
 
+       mutex_lock(&bus->device_lock);
        if (cl->state == MEI_FILE_UNINITIALIZED) {
-               mutex_lock(&bus->device_lock);
                ret = mei_cl_link(cl);
-               mutex_unlock(&bus->device_lock);
                if (ret)
-                       return ret;
+                       goto out;
                /* update pointers */
                cl->cldev = cldev;
        }
 
-       mutex_lock(&bus->device_lock);
        if (mei_cl_is_connected(cl)) {
                ret = 0;
                goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
        if (err < 0)
                dev_err(bus->dev, "Could not disconnect from the ME client\n");
 
-out:
        mei_cl_bus_module_put(cldev);
-
+out:
        /* Flush queues and remove any pending read */
        mei_cl_flush_queues(cl, NULL);
        mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
 
        mei_me_cl_put(cldev->me_cl);
        mei_dev_bus_put(cldev->bus);
+       mei_cl_unlink(cldev->cl);
        kfree(cldev->cl);
        kfree(cldev);
 }
 
 static const struct device_type mei_cl_device_type = {
-       .release        = mei_cl_bus_dev_release,
+       .release = mei_cl_bus_dev_release,
 };
 
 /**
index 4ab6251..ebdcf0b 100644 (file)
@@ -1767,7 +1767,7 @@ out:
                }
        }
 
-       rets = buf->size;
+       rets = len;
 err:
        cl_dbg(dev, cl, "rpm: autosuspend\n");
        pm_runtime_mark_last_busy(dev->dev);
index 09e233d..e56f3e7 100644 (file)
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                props_res = (struct hbm_props_response *)mei_msg;
 
-               if (props_res->status) {
+               if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
+                       dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+                               props_res->me_addr);
+               } else if (props_res->status) {
                        dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
                                props_res->status,
                                mei_hbm_status_str(props_res->status));
                        return -EPROTO;
+               } else {
+                       mei_hbm_me_cl_add(dev, props_res);
                }
 
-               mei_hbm_me_cl_add(dev, props_res);
-
                /* request property for the next client */
                if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
                        return -EIO;
index 09cb896..2cfec33 100644 (file)
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
 static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
 {
        struct device_node *slot_node;
+       struct platform_device *pdev;
 
        /*
         * TODO: the MMC core framework currently does not support
         * controllers with multiple slots properly. So we only register
         * the first slot for now
         */
-       slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+       slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
        if (!slot_node) {
                dev_warn(parent, "no 'mmc-slot' sub-node found\n");
                return ERR_PTR(-ENOENT);
        }
 
-       return of_platform_device_create(slot_node, NULL, parent);
+       pdev = of_platform_device_create(slot_node, NULL, parent);
+       of_node_put(slot_node);
+
+       return pdev;
 }
 
 static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
index 071693e..68760d4 100644 (file)
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
        dma_release_channel(host->tx_chan);
        dma_release_channel(host->rx_chan);
 
+       dev_pm_clear_wake_irq(host->dev);
        pm_runtime_dont_use_autosuspend(host->dev);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
index 17f12c1..7635c38 100644 (file)
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
        cqe = &admin_queue->cq.entries[head_masked];
 
        /* Go over all the completions */
-       while ((cqe->acq_common_descriptor.flags &
+       while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
                        ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Do not read the rest of the completion entry before the
                 * phase bit was validated
                 */
-               rmb();
+               dma_rmb();
                ena_com_handle_single_admin_completion(admin_queue, cqe);
 
                head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
        mmio_read_reg |= mmio_read->seq_num &
                        ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
-       /* make sure read_resp->req_id get updated before the hw can write
-        * there
-        */
-       wmb();
-
-       writel_relaxed(mmio_read_reg,
-                      ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+       writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
-       mmiowb();
        for (i = 0; i < timeout; i++) {
-               if (read_resp->req_id == mmio_read->seq_num)
+               if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
                        break;
 
                udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
        aenq_common = &aenq_e->aenq_common_desc;
 
        /* Go over all the events */
-       while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
-              phase) {
+       while ((READ_ONCE(aenq_common->flags) &
+               ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+               /* Make sure the phase bit (ownership) is as expected before
+                * reading the rest of the descriptor.
+                */
+               dma_rmb();
+
                pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
                         aenq_common->group, aenq_common->syndrom,
                         (u64)aenq_common->timestamp_low +
index ea149c1..1c682b7 100644 (file)
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        if (desc_phase != expected_phase)
                return NULL;
 
+       /* Make sure we read the rest of the descriptor after the phase bit
+        * has been read
+        */
+       dma_rmb();
+
        return cdesc;
 }
 
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
        if (cdesc_phase != expected_phase)
                return -EAGAIN;
 
+       dma_rmb();
        if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
                pr_err("Invalid req id %d\n", cdesc->req_id);
                return -EINVAL;
index 6fdc753..2f76572 100644 (file)
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
        return io_sq->q_depth - 1 - cnt;
 }
 
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
-                                           bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
 {
        u16 tail;
 
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
        pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
                 io_sq->qid, tail);
 
-       if (relaxed)
-               writel_relaxed(tail, io_sq->db_addr);
-       else
-               writel(tail, io_sq->db_addr);
+       writel(tail, io_sq->db_addr);
 
        return 0;
 }
index c673ac2..29b5774 100644 (file)
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 
 static int ena_rss_init_default(struct ena_adapter *adapter);
 static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
 static int ena_restore_device(struct ena_adapter *adapter);
 
 static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
                return -ENOMEM;
        }
 
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+       dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
                           DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
                u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
        rx_info->page_offset = 0;
        ena_buf = &rx_info->ena_buf;
        ena_buf->paddr = dma;
-       ena_buf->len = PAGE_SIZE;
+       ena_buf->len = ENA_PAGE_SIZE;
 
        return 0;
 }
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
                return;
        }
 
-       dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+       dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
                       DMA_FROM_DEVICE);
 
        __free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                            rx_ring->qid, i, num);
        }
 
-       if (likely(i)) {
-               /* Add memory barrier to make sure the desc were written before
-                * issue a doorbell
-                */
-               wmb();
-               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
-               mmiowb();
-       }
+       /* ena_com_write_sq_doorbell issues a wmb() */
+       if (likely(i))
+               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
 
        rx_ring->next_to_use = next_to_use;
 
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        do {
                dma_unmap_page(rx_ring->dev,
                               dma_unmap_addr(&rx_info->ena_buf, paddr),
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-                               rx_info->page_offset, len, PAGE_SIZE);
+                               rx_info->page_offset, len, ENA_PAGE_SIZE);
 
                netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
                          "rx skb updated. len %d. data_len %d\n",
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
                          "Destroy failure, restarting device\n");
                ena_dump_stats_to_dmesg(adapter);
                /* rtnl lock already obtained in dev_ioctl() layer */
-               ena_destroy_device(adapter);
+               ena_destroy_device(adapter, false);
                ena_restore_device(adapter);
        }
 
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
                tx_ring->ring_size);
 
-       /* This WMB is aimed to:
-        * 1 - perform smp barrier before reading next_to_completion
-        * 2 - make sure the desc were written before trigger DB
-        */
-       wmb();
-
        /* stop the queue when no more space available, the packet can have up
         * to sgl_size + 2. one for the meta descriptor and one for header
         * (if the header is larger than tx_max_header_size).
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * stop the queue but meanwhile clean_tx_irq updates
                 * next_to_completion and terminates.
                 * The queue will remain stopped forever.
-                * To solve this issue this function perform rmb, check
-                * the wakeup condition and wake up the queue if needed.
+                * To solve this issue add a mb() to make sure that
+                * netif_tx_stop_queue() write is vissible before checking if
+                * there is additional space in the queue.
                 */
-               smp_rmb();
+               smp_mb();
 
                if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
                                > ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (netif_xmit_stopped(txq) || !skb->xmit_more) {
-               /* trigger the dma engine */
-               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+               /* trigger the dma engine. ena_com_write_sq_doorbell()
+                * has a mb
+                */
+               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
                u64_stats_update_begin(&tx_ring->syncp);
                tx_ring->tx_stats.doorbells++;
                u64_stats_update_end(&tx_ring->syncp);
@@ -2550,12 +2542,15 @@ err_disable_msix:
        return rc;
 }
 
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
 {
        struct net_device *netdev = adapter->netdev;
        struct ena_com_dev *ena_dev = adapter->ena_dev;
        bool dev_up;
 
+       if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               return;
+
        netif_carrier_off(netdev);
 
        del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2558,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
        adapter->dev_up_before_reset = dev_up;
 
-       ena_com_set_admin_running_state(ena_dev, false);
+       if (!graceful)
+               ena_com_set_admin_running_state(ena_dev, false);
 
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
@@ -2591,6 +2587,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        adapter->reset_reason = ENA_REGS_RESET_NORMAL;
 
        clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+       clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
 }
 
 static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2632,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
                }
        }
 
+       set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
        dev_err(&pdev->dev, "Device reset completed successfully\n");
 
@@ -2665,7 +2663,7 @@ static void ena_fw_reset_device(struct work_struct *work)
                return;
        }
        rtnl_lock();
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, false);
        ena_restore_device(adapter);
        rtnl_unlock();
 }
@@ -3409,30 +3407,24 @@ static void ena_remove(struct pci_dev *pdev)
                netdev->rx_cpu_rmap = NULL;
        }
 #endif /* CONFIG_RFS_ACCEL */
-
-       unregister_netdev(netdev);
        del_timer_sync(&adapter->timer_service);
 
        cancel_work_sync(&adapter->reset_task);
 
-       /* Reset the device only if the device is running. */
-       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
-               ena_com_dev_reset(ena_dev, adapter->reset_reason);
+       unregister_netdev(netdev);
 
-       ena_free_mgmnt_irq(adapter);
+       /* If the device is running then we want to make sure the device will be
+        * reset to make sure no more events will be issued by the device.
+        */
+       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
 
-       ena_disable_msix(adapter);
+       rtnl_lock();
+       ena_destroy_device(adapter, true);
+       rtnl_unlock();
 
        free_netdev(netdev);
 
-       ena_com_mmio_reg_read_request_destroy(ena_dev);
-
-       ena_com_abort_admin_commands(ena_dev);
-
-       ena_com_wait_for_abort_completion(ena_dev);
-
-       ena_com_admin_destroy(ena_dev);
-
        ena_com_rss_destroy(ena_dev);
 
        ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3459,7 @@ static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
                        "ignoring device reset request as the device is being suspended\n");
                clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
        }
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, true);
        rtnl_unlock();
        return 0;
 }
index f1972b5..7c7ae56 100644 (file)
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
index 8bb1e38..cecbb1d 100644 (file)
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
        return bp->hw_resc.max_cp_rings;
 }
 
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
 {
-       bp->hw_resc.max_cp_rings = max;
+       return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
 }
 
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
                hw_resc->resv_rx_rings = 0;
                hw_resc->resv_hw_ring_grps = 0;
                hw_resc->resv_vnics = 0;
+               bp->tx_nr_rings = 0;
+               bp->rx_nr_rings = 0;
        }
        return rc;
 }
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
-       *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+       *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
+                       hw_resc->max_irqs);
        *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
        if (bp->tx_nr_rings)
                return 0;
 
+       bnxt_ulp_irq_stop(bp);
+       bnxt_clear_int_mode(bp);
        rc = bnxt_set_dflt_rings(bp, true);
        if (rc) {
                netdev_err(bp->dev, "Not enough rings available.\n");
-               return rc;
+               goto init_dflt_ring_err;
        }
        rc = bnxt_init_int_mode(bp);
        if (rc)
-               return rc;
+               goto init_dflt_ring_err;
+
        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
        if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
                bp->flags |= BNXT_FLAG_RFS;
                bp->dev->features |= NETIF_F_NTUPLE;
        }
-       return 0;
+init_dflt_ring_err:
+       bnxt_ulp_irq_restart(bp, rc);
+       return rc;
 }
 
 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
index fefa011..bde3846 100644 (file)
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index 6d583bc..fcd085a 100644 (file)
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
 
-       vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        max_stat_ctxs = hw_resc->max_stat_ctxs;
 
        /* Remaining rings are distributed equally amongs VF's for now */
-       vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+       vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
+                      bp->cp_nr_rings) / num_vfs;
        vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
         */
        vfs_supported = *num_vfs;
 
-       avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        avail_cp = min_t(int, avail_cp, avail_stat);
 
index c37b284..beee612 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
 }
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
 {
        struct net_device *dev = edev->net;
        struct bnxt *bp = netdev_priv(dev);
-       int max_cp_rings, msix_requested;
 
        ASSERT_RTNL();
        if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
                return 0;
 
-       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
-       msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
        return 0;
 }
 
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
-{
-       ASSERT_RTNL();
-       if (bnxt_ulp_registered(bp->edev, ulp_id)) {
-               struct bnxt_en_dev *edev = bp->edev;
-               unsigned int msix_req, max;
-
-               msix_req = edev->ulp_tbl[ulp_id].msix_requested;
-               max = bnxt_get_max_func_cp_rings(bp);
-               bnxt_set_max_func_cp_rings(bp, max - msix_req);
-               max = bnxt_get_max_func_stat_ctxs(bp);
-               bnxt_set_max_func_stat_ctxs(bp, max - 1);
-       }
-}
-
 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
                         struct bnxt_fw_msg *fw_msg)
 {
index df48ac7..d9bea37 100644 (file)
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
 
 int bnxt_get_ulp_msix_num(struct bnxt *bp);
 int bnxt_get_ulp_msix_base(struct bnxt *bp);
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
 void bnxt_ulp_stop(struct bnxt *bp);
 void bnxt_ulp_start(struct bnxt *bp);
 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
index b773bc0..14b4961 100644 (file)
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
 #define UMAC_MAC1                      0x010
 #define UMAC_MAX_FRAME_LEN             0x014
 
+#define UMAC_MODE                      0x44
+#define  MODE_LINK_STATUS              (1 << 5)
+
 #define UMAC_EEE_CTRL                  0x064
 #define  EN_LPI_RX_PAUSE               (1 << 0)
 #define  EN_LPI_TX_PFC                 (1 << 1)
index 5333274..4241ae9 100644 (file)
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
 static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
                                          struct fixed_phy_status *status)
 {
-       if (dev && dev->phydev && status)
-               status->link = dev->phydev->link;
+       struct bcmgenet_priv *priv;
+       u32 reg;
+
+       if (dev && dev->phydev && status) {
+               priv = netdev_priv(dev);
+               reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+               status->link = !!(reg & MODE_LINK_STATUS);
+       }
 
        return 0;
 }
index c6707ea..16e4ef7 100644 (file)
@@ -649,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
                if (!(status & MACB_BIT(TGO)))
                        return 0;
 
-               usleep_range(10, 250);
+               udelay(250);
        } while (time_before(halt_time, timeout));
 
        return -ETIMEDOUT;
index ff92ab1..1e9d882 100644 (file)
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                                port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
                        }
                }
-               return status;
+               goto err;
        }
 
        pcie = be_get_pcie_desc(resp->func_param, desc_count,
index cad52bd..08a750f 100644 (file)
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
                        u8 *auto_neg, u16 *speed, u8 *duplex);
        void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
        void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+       bool (*need_adjust_link)(struct hnae_handle *handle,
+                                int speed, int duplex);
        int (*set_loopback)(struct hnae_handle *handle,
                            enum hnae_loop loop_mode, int en);
        void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
index e6aad30..b52029e 100644 (file)
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
                hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
 }
 
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+       struct dsaf_device *dsaf_dev;
+       struct hns_ppe_cb *ppe_cb;
+       struct hnae_vf_cb *vf_cb;
+       int ret;
+       int i;
+
+       for (i = 0; i < handle->q_num; i++) {
+               ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+               if (ret)
+                       return ret;
+       }
+
+       ppe_cb = hns_get_ppe_cb(handle);
+       ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+       if (ret)
+               return ret;
+
+       dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+       if (!dsaf_dev)
+               return -EINVAL;
+       ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+       if (ret)
+               return ret;
+
+       vf_cb = hns_ae_get_vf_cb(handle);
+       ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+       if (ret)
+               return ret;
+
+       mdelay(10);
+       return 0;
+}
+
 static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
 {
        int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
        return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
 }
 
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+                                   int duplex)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
 static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
                               int duplex)
 {
        struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
 
-       hns_mac_adjust_link(mac_cb, speed, duplex);
+       switch (mac_cb->dsaf_dev->dsaf_ver) {
+       case AE_VERSION_1:
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               break;
+
+       case AE_VERSION_2:
+               /* chip need to clear all pkt inside */
+               hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+               if (hns_ae_wait_flow_down(handle)) {
+                       hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+                       break;
+               }
+
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+               break;
+
+       default:
+               break;
+       }
+
+       return;
 }
 
 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
        .get_status = hns_ae_get_link_status,
        .get_info = hns_ae_get_mac_info,
        .adjust_link = hns_ae_adjust_link,
+       .need_adjust_link = hns_ae_need_adjust_link,
        .set_loopback = hns_ae_config_loopback,
        .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
        .get_pauseparam = hns_ae_get_pauseparam,
index 5488c6e..09e4061 100644 (file)
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
        *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
 }
 
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+                                     int duplex)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+       return (mac_cb->speed != speed) ||
+               (mac_cb->half_duplex == duplex);
+}
+
 static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
                                u32 full_duplex)
 {
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
                hns_gmac_set_uc_match(mac_drv, en);
 }
 
+int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+               /* bit5~bit0 is not send complete pkts */
+               if ((val & 0x3f) == 0)
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(drv->dev,
+                       "hns ge %d fifo was not idle.\n", drv->mac_id);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 static void hns_gmac_init(void *mac_drv)
 {
        u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->mac_disable = hns_gmac_disable;
        mac_drv->mac_free = hns_gmac_free;
        mac_drv->adjust_link = hns_gmac_adjust_link;
+       mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
        mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
        mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
        mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->get_strings = hns_gmac_get_strings;
        mac_drv->update_stats = hns_gmac_update_stats;
        mac_drv->set_promiscuous = hns_gmac_set_promisc;
+       mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
 
        return (void *)mac_drv;
 }
index 1c2326b..6ed6f14 100644 (file)
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
        return 0;
 }
 
+/**
+ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+       struct mac_driver *mac_ctrl_drv;
+
+       mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+       if (mac_ctrl_drv->need_adjust_link)
+               return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+                       (enum mac_speed)speed, duplex);
+       else
+               return true;
+}
+
 void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
 {
        int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
        return 0;
 }
 
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+       if (drv->wait_fifo_clean)
+               return drv->wait_fifo_clean(drv);
+
+       return 0;
+}
+
 void hns_mac_reset(struct hns_mac_cb *mac_cb)
 {
        struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
                return  DSAF_MAX_PORT_NUM;
 }
 
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
 /**
  * hns_mac_init - init mac
  * @dsaf_dev: dsa fabric device struct pointer
index bbc0a98..fbc7534 100644 (file)
@@ -356,6 +356,9 @@ struct mac_driver {
        /*adjust mac mode of port,include speed and duplex*/
        int (*adjust_link)(void *mac_drv, enum mac_speed speed,
                           u32 full_duplex);
+       /* need adjust link */
+       bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+                                int duplex);
        /* config autoegotaite mode of port*/
        void (*set_an_mode)(void *mac_drv, u8 enable);
        /* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
        void (*get_info)(void *mac_drv, struct mac_info *mac_info);
 
        void (*update_stats)(void *mac_drv);
+       int (*wait_fifo_clean)(void *mac_drv);
 
        enum mac_mode mac_mode;
        u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
 
 int hns_mac_init(struct dsaf_device *dsaf_dev);
 void mac_adjust_link(struct net_device *net_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb,        u32 *link_status);
 int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
 int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
 int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
                       const unsigned char *addr);
 int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
 
 #endif /* _HNS_DSAF_MAC_H */
index ca50c25..e557a4e 100644 (file)
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
        soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
 }
 
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+       u32 val, val_tmp;
+       int wait_cnt;
+
+       if (port >= DSAF_SERVICE_NW_NUM)
+               return 0;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               if (val == val_tmp)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+                       val, val_tmp);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * dsaf_probe - probo dsaf dev
  * @pdev: dasf platform device
index 4507e82..0e1cd99 100644 (file)
@@ -44,6 +44,8 @@ struct hns_mac_cb;
 #define DSAF_ROCE_CREDIT_CHN   8
 #define DSAF_ROCE_CHAN_MODE    3
 
+#define HNS_MAX_WAIT_CNT 10000
+
 enum dsaf_roce_port_mode {
        DSAF_ROCE_6PORT_MODE,
        DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
 
 int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
index d160d8c..0942e49 100644 (file)
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
        dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
 }
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+               if (!val)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+                       val);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * ppe_init_hw - init ppe
  * @ppe_cb: ppe device
index 9d8e643..f670e63 100644 (file)
@@ -100,6 +100,7 @@ struct ppe_common_cb {
 
 };
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
 int hns_ppe_init(struct dsaf_device *dsaf_dev);
 
 void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
index 9d76e2e..5d64519 100644 (file)
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
                        "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
 }
 
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+       u32 head, tail;
+       int wait_cnt;
+
+       tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+               if (tail == head)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  *hns_rcb_reset_ring_hw - ring reset
  *@q: ring struct pointer
index 6028164..2319b77 100644 (file)
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
 u32 hns_rcb_get_rx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx);
 u32 hns_rcb_get_tx_coalesced_frames(
index 886cbbf..74d935d 100644 (file)
 #define RCB_RING_INTMSK_TX_OVERTIME_REG                0x000C4
 #define RCB_RING_INTSTS_TX_OVERTIME_REG                0x000C8
 
+#define GMAC_FIFO_STATE_REG                    0x0000UL
 #define GMAC_DUPLEX_TYPE_REG                   0x0008UL
 #define GMAC_FD_FC_TYPE_REG                    0x000CUL
 #define GMAC_TX_WATER_LINE_REG                 0x0010UL
index 02a0ba2..f56855e 100644 (file)
@@ -1112,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
        struct hnae_handle *h = priv->ae_handle;
        int state = 1;
 
+       /* If there is no phy, do not need adjust link */
        if (ndev->phydev) {
-               h->dev->ops->adjust_link(h, ndev->phydev->speed,
-                                        ndev->phydev->duplex);
-               state = ndev->phydev->link;
+               /* When phy link down, do nothing */
+               if (ndev->phydev->link == 0)
+                       return;
+
+               if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+                                                 ndev->phydev->duplex)) {
+                       /* because Hi161X chip don't support to change gmac
+                        * speed and duplex with traffic. Delay 200ms to
+                        * make sure there is no more data in chip FIFO.
+                        */
+                       netif_carrier_off(ndev);
+                       msleep(200);
+                       h->dev->ops->adjust_link(h, ndev->phydev->speed,
+                                                ndev->phydev->duplex);
+                       netif_carrier_on(ndev);
+               }
        }
+
        state = state && h->dev->ops->get_status(h);
 
        if (state != priv->link) {
index 08f3c47..774beda 100644 (file)
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
        }
 
        if (h->dev->ops->adjust_link) {
+               netif_carrier_off(net_dev);
                h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+               netif_carrier_on(net_dev);
                return 0;
        }
 
index 354c098..3726646 100644 (file)
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
        case 16384:
                ret |= EMAC_MR1_RFS_16K;
                break;
-       case 8192:
-               ret |= EMAC4_MR1_RFS_8K;
-               break;
        case 4096:
                ret |= EMAC_MR1_RFS_4K;
                break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
        case 16384:
                ret |= EMAC4_MR1_RFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_RFS_8K;
+               break;
        case 4096:
                ret |= EMAC4_MR1_RFS_4K;
                break;
index dafdd4a..4f0daf6 100644 (file)
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        adapter->map_id = 1;
                        release_rx_pools(adapter);
                        release_tx_pools(adapter);
-                       init_rx_pools(netdev);
-                       init_tx_pools(netdev);
+                       rc = init_rx_pools(netdev);
+                       if (rc)
+                               return rc;
+                       rc = init_tx_pools(netdev);
+                       if (rc)
+                               return rc;
 
                        release_napi(adapter);
-                       init_napi(adapter);
+                       rc = init_napi(adapter);
+                       if (rc)
+                               return rc;
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
index 7a637b5..e08301d 100644 (file)
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
                struct ltq_etop_chan *ch = &priv->ch[i];
 
                ch->idx = ch->dma.nr = i;
+               ch->dma.dev = &priv->pdev->dev;
 
                if (IS_TX(i)) {
                        ltq_dma_alloc_tx(&ch->dma);
index 32d785b..2850041 100644 (file)
@@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->min_mtu = ETH_MIN_MTU;
        /* 9704 == 9728 - 20 and rounding to 8 */
        dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+       dev->dev.of_node = port_node;
 
        /* Phylink isn't used w/ ACPI as of now */
        if (port_node) {
index b994b80..37ba7c7 100644 (file)
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
        delayed_event_start(priv);
 
        dev_ctx->context = intf->add(dev);
-       set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-       if (intf->attach)
-               set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
        if (dev_ctx->context) {
+               set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+               if (intf->attach)
+                       set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
                spin_lock_irq(&priv->ctx_lock);
                list_add_tail(&dev_ctx->list, &priv->ctx_list);
 
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
        if (intf->attach) {
                if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
                        goto out;
-               intf->attach(dev, dev_ctx->context);
+               if (intf->attach(dev, dev_ctx->context))
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
        } else {
                if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
                        goto out;
                dev_ctx->context = intf->add(dev);
+               if (!dev_ctx->context)
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
        }
 
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
                }
 }
 
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 {
-       return (u16)((dev->pdev->bus->number << 8) |
+       return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+                    (dev->pdev->bus->number << 8) |
                     PCI_SLOT(dev->pdev->devfn));
 }
 
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
-       u16 pci_id = mlx5_gen_pci_id(dev);
+       u32 pci_id = mlx5_gen_pci_id(dev);
        struct mlx5_core_dev *res = NULL;
        struct mlx5_core_dev *tmp_dev;
        struct mlx5_priv *priv;
index 75bb981..41cde92 100644 (file)
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
 {
        if (psrc_m) {
                MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
-               MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+               MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
        }
 
        if (pdst_m) {
index f72b5c9..3028e8d 100644 (file)
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
        if (err)
                goto miss_rule_err;
 
+       kvfree(flow_group_in);
        return 0;
 
 miss_rule_err:
index f418541..37d114c 100644 (file)
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
        return version;
 }
 
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+                 u32 *match_value,
+                 bool take_write)
+{
+       struct fs_fte *fte_tmp;
+
+       if (take_write)
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+       else
+               nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+                                        rhash_fte);
+       if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+               fte_tmp = NULL;
+               goto out;
+       }
+
+       nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+       if (take_write)
+               up_write_ref_node(&g->node);
+       else
+               up_read_ref_node(&g->node);
+       return fte_tmp;
+}
+
 static struct mlx5_flow_handle *
 try_add_to_existing_fg(struct mlx5_flow_table *ft,
                       struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
        if (IS_ERR(fte))
                return  ERR_PTR(-ENOMEM);
 
-       list_for_each_entry(iter, match_head, list) {
-               nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
-       }
-
 search_again_locked:
        version = matched_fgs_get_version(match_head);
        /* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
                struct fs_fte *fte_tmp;
 
                g = iter->g;
-               fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
-                                                rhash_fte);
-               if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+               fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+               if (!fte_tmp)
                        continue;
-
-               nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-               if (!take_write) {
-                       list_for_each_entry(iter, match_head, list)
-                               up_read_ref_node(&iter->g->node);
-               } else {
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
-               }
-
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte_tmp);
                up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
                return rule;
        }
 
-       /* No group with matching fte found. Try to add a new fte to any
-        * matching fg.
-        */
-
-       if (!take_write) {
-               list_for_each_entry(iter, match_head, list)
-                       up_read_ref_node(&iter->g->node);
-               list_for_each_entry(iter, match_head, list)
-                       nested_down_write_ref_node(&iter->g->node,
-                                                  FS_LOCK_PARENT);
-               take_write = true;
-       }
-
        /* Check the ft version, for case that new flow group
         * was added while the fgs weren't locked
         */
@@ -1657,27 +1656,30 @@ search_again_locked:
        /* Check the fgs version, for case the new FTE with the
         * same values was added while the fgs weren't locked
         */
-       if (version != matched_fgs_get_version(match_head))
+       if (version != matched_fgs_get_version(match_head)) {
+               take_write = true;
                goto search_again_locked;
+       }
 
        list_for_each_entry(iter, match_head, list) {
                g = iter->g;
 
                if (!g->node.active)
                        continue;
+
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
                err = insert_fte(g, fte);
                if (err) {
+                       up_write_ref_node(&g->node);
                        if (err == -ENOSPC)
                                continue;
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
                        kmem_cache_free(steering->ftes_cache, fte);
                        return ERR_PTR(err);
                }
 
                nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
-               list_for_each_entry(iter, match_head, list)
-                       up_write_ref_node(&iter->g->node);
+               up_write_ref_node(&g->node);
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte);
                up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
        }
        rule = ERR_PTR(-ENOENT);
 out:
-       list_for_each_entry(iter, match_head, list)
-               up_write_ref_node(&iter->g->node);
        kmem_cache_free(steering->ftes_cache, fte);
        return rule;
 }
@@ -1726,6 +1726,8 @@ search_again_locked:
        if (err) {
                if (take_write)
                        up_write_ref_node(&ft->node);
+               else
+                       up_read_ref_node(&ft->node);
                return ERR_PTR(err);
        }
 
index d39b0b7..9f39aec 100644 (file)
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
        add_timer(&health->timer);
 }
 
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
 {
        struct mlx5_core_health *health = &dev->priv.health;
+       unsigned long flags;
+
+       if (disable_health) {
+               spin_lock_irqsave(&health->wq_lock, flags);
+               set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+               set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+               spin_unlock_irqrestore(&health->wq_lock, flags);
+       }
 
        del_timer_sync(&health->timer);
 }
index cf3e4a6..b5e9f66 100644 (file)
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        priv->numa_node = dev_to_node(&dev->pdev->dev);
 
        priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
-       if (!priv->dbg_root)
+       if (!priv->dbg_root) {
+               dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
                return -ENOMEM;
+       }
 
        err = mlx5_pci_enable_device(dev);
        if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
        mlx5_pci_disable_device(dev);
-       debugfs_remove(priv->dbg_root);
+       debugfs_remove_recursive(priv->dbg_root);
 }
 
 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
                mlx5_cleanup_once(dev);
 
 err_stop_poll:
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, boot);
        if (mlx5_cmd_teardown_hca(dev)) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
                goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        mlx5_free_irq_vectors(dev);
        if (cleanup)
                mlx5_cleanup_once(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, cleanup);
        err = mlx5_cmd_teardown_hca(dev);
        if (err) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
         * with the HCA, so the health polll is no longer needed.
         */
        mlx5_drain_health_wq(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, false);
 
        ret = mlx5_cmd_force_teardown_hca(dev);
        if (ret) {
index 86478a6..68e7f8d 100644 (file)
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
        return (u32)wq->fbc.sz_m1 + 1;
 }
 
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
 {
-       return (u32)wq->fbc.frag_sz_m1 + 1;
+       return wq->fbc.frag_sz_m1 + 1;
 }
 
 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -138,15 +138,16 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
-       u32 sq_strides_offset;
+       u16 sq_strides_offset;
+       u32 rq_pg_remainder;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
 
-       sq_strides_offset =
-               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+       rq_pg_remainder   = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
+       sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
 
        mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
                             MLX5_GET(qpc, qpc, log_sq_size),
index 2bd4c31..3a1a170 100644 (file)
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                       void *wqc, struct mlx5_wq_cyc *wq,
                       struct mlx5_wq_ctrl *wq_ctrl);
 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
index 4327487..3589432 100644 (file)
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
        MLXSW_SP_SB_CM(1, 0xff, 0),
 };
 
index 9044496..46ba0cf 100644 (file)
@@ -52,6 +52,7 @@
 #define NFP_FL_TUNNEL_CSUM                     cpu_to_be16(0x01)
 #define NFP_FL_TUNNEL_KEY                      cpu_to_be16(0x04)
 #define NFP_FL_TUNNEL_GENEVE_OPT               cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS     IP_TUNNEL_INFO_TX
 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS    (NFP_FL_TUNNEL_CSUM | \
                                                 NFP_FL_TUNNEL_KEY | \
                                                 NFP_FL_TUNNEL_GENEVE_OPT)
@@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
                nfp_fl_push_vlan(psh_v, a);
                *a_len += sizeof(struct nfp_fl_push_vlan);
        } else if (is_tcf_tunnel_set(a)) {
+               struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
                struct nfp_repr *repr = netdev_priv(netdev);
+
                *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
                if (*tun_type == NFP_FL_TUNNEL_NONE)
                        return -EOPNOTSUPP;
 
+               if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+                       return -EOPNOTSUPP;
+
                /* Pre-tunnel action is required for tunnel encap.
                 * This checks for next hop entries on NFP.
                 * If none, the packet falls back before applying other actions.
index 85f8209..81d941a 100644 (file)
@@ -70,6 +70,7 @@ struct nfp_app;
 #define NFP_FL_FEATS_GENEVE            BIT(0)
 #define NFP_FL_NBI_MTU_SETTING         BIT(1)
 #define NFP_FL_FEATS_GENEVE_OPT                BIT(2)
+#define NFP_FL_FEATS_VLAN_PCP          BIT(3)
 #define NFP_FL_FEATS_LAG               BIT(31)
 
 struct nfp_fl_mask_id {
index a0c72f2..17acb8c 100644 (file)
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
                                                      FLOW_DISSECTOR_KEY_VLAN,
                                                      target);
                /* Populate the tci field. */
-               if (flow_vlan->vlan_id) {
+               if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
                        tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
                                             flow_vlan->vlan_priority) |
                                  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
index 2edab01..bd19624 100644 (file)
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                key_size += sizeof(struct nfp_flower_mac_mpls);
        }
 
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+               struct flow_dissector_key_vlan *flow_vlan;
+
+               flow_vlan = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_VLAN,
+                                                     flow->mask);
+               if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
+                   flow_vlan->vlan_priority)
+                       return -EOPNOTSUPP;
+       }
+
        if (dissector_uses_key(flow->dissector,
                               FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
                struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
index a8b9fba..253bdae 100644 (file)
@@ -229,29 +229,16 @@ done:
        spin_unlock_bh(&nn->reconfig_lock);
 }
 
-/**
- * nfp_net_reconfig() - Reconfigure the firmware
- * @nn:      NFP Net device to reconfigure
- * @update:  The value for the update field in the BAR config
- *
- * Write the update word to the BAR and ping the reconfig queue.  The
- * poll until the firmware has acknowledged the update by zeroing the
- * update word.
- *
- * Return: Negative errno on error, 0 on success
- */
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
 {
        bool cancelled_timer = false;
        u32 pre_posted_requests;
-       int ret;
 
        spin_lock_bh(&nn->reconfig_lock);
 
        nn->reconfig_sync_present = true;
 
        if (nn->reconfig_timer_active) {
-               del_timer(&nn->reconfig_timer);
                nn->reconfig_timer_active = false;
                cancelled_timer = true;
        }
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
 
        spin_unlock_bh(&nn->reconfig_lock);
 
-       if (cancelled_timer)
+       if (cancelled_timer) {
+               del_timer_sync(&nn->reconfig_timer);
                nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+       }
 
        /* Run the posted reconfigs which were issued before we started */
        if (pre_posted_requests) {
                nfp_net_reconfig_start(nn, pre_posted_requests);
                nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
        }
+}
+
+static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
+{
+       nfp_net_reconfig_sync_enter(nn);
+
+       spin_lock_bh(&nn->reconfig_lock);
+       nn->reconfig_sync_present = false;
+       spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig() - Reconfigure the firmware
+ * @nn:      NFP Net device to reconfigure
+ * @update:  The value for the update field in the BAR config
+ *
+ * Write the update word to the BAR and ping the reconfig queue.  The
+ * poll until the firmware has acknowledged the update by zeroing the
+ * update word.
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+       int ret;
+
+       nfp_net_reconfig_sync_enter(nn);
 
        nfp_net_reconfig_start(nn, update);
        ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
  */
 void nfp_net_free(struct nfp_net *nn)
 {
+       WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
        if (nn->dp.netdev)
                free_netdev(nn->dp.netdev);
        else
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn)
                return;
 
        unregister_netdev(nn->dp.netdev);
+       nfp_net_reconfig_wait_posted(nn);
 }
index ffe7a16..6c8543f 100644 (file)
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
 {
        __be16 rx_data;
        __be16 tx_data;
-       struct spi_transfer *transfer;
-       struct spi_message *msg;
+       struct spi_transfer transfer[2];
+       struct spi_message msg;
        int ret;
 
+       memset(transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
+       *result = 0;
+
+       transfer[0].tx_buf = &tx_data;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].rx_buf = &rx_data;
+       transfer[1].len = QCASPI_CMD_LEN;
+
+       spi_message_add_tail(&transfer[0], &msg);
 
        if (qca->legacy_mode) {
-               msg = &qca->spi_msg1;
-               transfer = &qca->spi_xfer1;
-               transfer->tx_buf = &tx_data;
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               spi_sync(qca->spi_dev, msg);
-       } else {
-               msg = &qca->spi_msg2;
-               transfer = &qca->spi_xfer2[0];
-               transfer->tx_buf = &tx_data;
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               transfer = &qca->spi_xfer2[1];
+               spi_sync(qca->spi_dev, &msg);
+               spi_message_init(&msg);
        }
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = &rx_data;
-       transfer->len = QCASPI_CMD_LEN;
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
@@ -86,35 +85,32 @@ int
 qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
 {
        __be16 tx_data[2];
-       struct spi_transfer *transfer;
-       struct spi_message *msg;
+       struct spi_transfer transfer[2];
+       struct spi_message msg;
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
        tx_data[1] = cpu_to_be16(value);
 
+       transfer[0].tx_buf = &tx_data[0];
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].tx_buf = &tx_data[1];
+       transfer[1].len = QCASPI_CMD_LEN;
+
+       spi_message_add_tail(&transfer[0], &msg);
        if (qca->legacy_mode) {
-               msg = &qca->spi_msg1;
-               transfer = &qca->spi_xfer1;
-               transfer->tx_buf = &tx_data[0];
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               spi_sync(qca->spi_dev, msg);
-       } else {
-               msg = &qca->spi_msg2;
-               transfer = &qca->spi_xfer2[0];
-               transfer->tx_buf = &tx_data[0];
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               transfer = &qca->spi_xfer2[1];
+               spi_sync(qca->spi_dev, &msg);
+               spi_message_init(&msg);
        }
-       transfer->tx_buf = &tx_data[1];
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
index 206f026..66b775d 100644 (file)
@@ -99,22 +99,24 @@ static u32
 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
 {
        __be16 cmd;
-       struct spi_message *msg = &qca->spi_msg2;
-       struct spi_transfer *transfer = &qca->spi_xfer2[0];
+       struct spi_message msg;
+       struct spi_transfer transfer[2];
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
        cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
-       transfer->tx_buf = &cmd;
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       transfer = &qca->spi_xfer2[1];
-       transfer->tx_buf = src;
-       transfer->rx_buf = NULL;
-       transfer->len = len;
+       transfer[0].tx_buf = &cmd;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].tx_buf = src;
+       transfer[1].len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[0], &msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+       if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
 static u32
 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
-       transfer->tx_buf = src;
-       transfer->rx_buf = NULL;
-       transfer->len = len;
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
+       transfer.tx_buf = src;
+       transfer.len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer, &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != len)) {
+       if (ret || (msg.actual_length != len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
 static u32
 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg2;
+       struct spi_message msg;
        __be16 cmd;
-       struct spi_transfer *transfer = &qca->spi_xfer2[0];
+       struct spi_transfer transfer[2];
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
        cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
-       transfer->tx_buf = &cmd;
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       transfer = &qca->spi_xfer2[1];
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = dst;
-       transfer->len = len;
+       transfer[0].tx_buf = &cmd;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].rx_buf = dst;
+       transfer[1].len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[0], &msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+       if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
 static u32
 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = dst;
-       transfer->len = len;
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
 
-       ret = spi_sync(qca->spi_dev, msg);
+       transfer.rx_buf = dst;
+       transfer.len = len;
 
-       if (ret || (msg->actual_length != len)) {
+       spi_message_add_tail(&transfer, &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
+
+       if (ret || (msg.actual_length != len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -195,19 +205,23 @@ static int
 qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
 {
        __be16 tx_data;
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data = cpu_to_be16(cmd);
-       transfer->len = sizeof(tx_data);
-       transfer->tx_buf = &tx_data;
-       transfer->rx_buf = NULL;
+       transfer.len = sizeof(cmd);
+       transfer.tx_buf = &tx_data;
+       spi_message_add_tail(&transfer, &msg);
 
-       ret = spi_sync(qca->spi_dev, msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
        qca = netdev_priv(dev);
        memset(qca, 0, sizeof(struct qcaspi));
 
-       memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
-       memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
-
-       spi_message_init(&qca->spi_msg1);
-       spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
-
-       spi_message_init(&qca->spi_msg2);
-       spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
-       spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
-
        memset(&qca->txr, 0, sizeof(qca->txr));
        qca->txr.count = TX_RING_MAX_LEN;
 }
index fc4beb1..fc0e987 100644 (file)
@@ -83,11 +83,6 @@ struct qcaspi {
        struct tx_ring txr;
        struct qcaspi_stats stats;
 
-       struct spi_message spi_msg1;
-       struct spi_message spi_msg2;
-       struct spi_transfer spi_xfer1;
-       struct spi_transfer spi_xfer2[2];
-
        u8 *rx_buffer;
        u32 buffer_size;
        u8 sync;
index 0efa977..1d86313 100644 (file)
@@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8161), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
+       { PCI_DEVICE(PCI_VENDOR_ID_NCUBE,       0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
        { PCI_VENDOR_ID_DLINK,                  0x4300,
                PCI_VENDOR_ID_DLINK, 0x4b10,             0, 0, RTL_CFG_1 },
@@ -630,7 +631,7 @@ struct rtl8169_tc_offsets {
 };
 
 enum rtl_flag {
-       RTL_FLAG_TASK_ENABLED,
+       RTL_FLAG_TASK_ENABLED = 0,
        RTL_FLAG_TASK_SLOW_PENDING,
        RTL_FLAG_TASK_RESET_PENDING,
        RTL_FLAG_MAX
@@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        rtl_hw_reset(tp);
 }
 
-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
        /* Set DMA burst size and Interframe Gap Time */
        RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
@@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
 
        rtl_set_rx_max_size(tp);
        rtl_set_rx_tx_desc_registers(tp);
-       rtl_set_rx_tx_config_registers(tp);
        RTL_W8(tp, Cfg9346, Cfg9346_Lock);
 
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R8(tp, IntrMask);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+       rtl_init_rxcfg(tp);
+       rtl_set_tx_config_registers(tp);
+
        rtl_set_rx_mode(tp->dev);
        /* no early-rx interrupts */
        RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
@@ -6652,7 +6655,8 @@ static int rtl8169_close(struct net_device *dev)
        rtl8169_update_counters(tp);
 
        rtl_lock_work(tp);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
        rtl8169_down(dev);
        rtl_unlock_work(tp);
@@ -6835,7 +6839,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
 
        rtl_lock_work(tp);
        napi_disable(&tp->napi);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
        rtl_unlock_work(tp);
 
        rtl_pll_power_down(tp);
index f3f7477..bb0ebdf 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Renesas device configuration
 #
index a05102a..f21ab8c 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the Renesas device drivers.
 #
index eede70e..0721b5c 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /* PTP 1588 clock using the Renesas Ethernet AVB
  *
  * Copyright (C) 2013-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
  */
 
 #include "ravb.h"
index ad4433d..f27a0dc 100644 (file)
@@ -798,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
        .magic          = 1,
        .cexcr          = 1,
 };
+
+/* R7S9210 */
+static struct sh_eth_cpu_data r7s9210_data = {
+       .soft_reset     = sh_eth_soft_reset,
+
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_rcar,
+
+       .register_type  = SH_ETH_REG_FAST_SH4,
+
+       .edtrr_trns     = EDTRR_TRNS_ETHER,
+       .ecsr_value     = ECSR_ICD,
+       .ecsipr_value   = ECSIPR_ICDIP,
+       .eesipr_value   = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
+                         EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
+                         EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
+                         EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
+                         EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
+                         EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
+
+       .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
+
+       .fdr_value      = 0x0000070f,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .hw_swap        = 1,
+       .rpadir         = 1,
+       .no_ade         = 1,
+       .xdfar_rw       = 1,
+};
 #endif /* CONFIG_OF */
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3121,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
        { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
        { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
        { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+       { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
        { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
        { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
        { }
index bf4aceb..324049e 100644 (file)
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
 
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
-       default ARCH_SOCFPGA
+       default (ARCH_SOCFPGA || ARCH_STRATIX10)
        depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
index 76649ad..c0a855b 100644 (file)
@@ -112,7 +112,6 @@ struct stmmac_priv {
        u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       bool tx_timer_armed;
 
        int tx_coalesce;
        int hwts_tx_en;
index ff1ffb4..9f458bb 100644 (file)
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * element in case of no SG.
         */
        priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
-           !priv->tx_timer_armed) {
+       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
                mod_timer(&priv->txtimer,
                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
-               priv->tx_timer_armed = true;
        } else {
                priv->tx_count_frames = 0;
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
-               priv->tx_timer_armed = false;
        }
 
        skb_tx_timestamp(skb);
index 0c1adad..396e1cd 100644 (file)
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
        struct device_node *node;
        struct cpsw_phy_sel_priv *priv;
 
-       node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+       node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
        if (!node) {
-               dev_err(dev, "Phy mode driver DT not found\n");
-               return;
+               node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+               if (!node) {
+                       dev_err(dev, "Phy mode driver DT not found\n");
+                       return;
+               }
        }
 
        dev = bus_find_device(&platform_bus_type, NULL, node, match);
index 1121a1e..70921bb 100644 (file)
@@ -2206,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       /* We must get rtnl lock before scheduling nvdev->subchan_work,
+        * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+        * all subchannels to show up, but that may not happen because
+        * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
+        * -> ... -> device_add() -> ... -> __device_attach() can't get
+        * the device lock, so all the subchannels can't be processed --
+        * finally netvsc_subchan_work() hangs for ever.
+        */
+       rtnl_lock();
+
        if (nvdev->num_chn > 1)
                schedule_work(&nvdev->subchan_work);
 
@@ -2224,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev,
        else
                net->max_mtu = ETH_DATA_LEN;
 
-       rtnl_lock();
        ret = register_netdevice(net);
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
index 4637d98..52fffb9 100644 (file)
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
        switch (type) {
        case hwmon_temp:
                switch (attr) {
-               case hwmon_temp_input:
                case hwmon_temp_min_alarm:
                case hwmon_temp_max_alarm:
                case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_temp_max:
                case hwmon_temp_lcrit:
                case hwmon_temp_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_temp_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_in:
                switch (attr) {
-               case hwmon_in_input:
                case hwmon_in_min_alarm:
                case hwmon_in_max_alarm:
                case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_in_max:
                case hwmon_in_lcrit:
                case hwmon_in_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_in_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_curr:
                switch (attr) {
-               case hwmon_curr_input:
                case hwmon_curr_min_alarm:
                case hwmon_curr_max_alarm:
                case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_curr_max:
                case hwmon_curr_lcrit:
                case hwmon_curr_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_curr_input:
                        return 0444;
                default:
                        return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                    channel == 1)
                        return 0;
                switch (attr) {
-               case hwmon_power_input:
                case hwmon_power_min_alarm:
                case hwmon_power_max_alarm:
                case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_power_max:
                case hwmon_power_lcrit:
                case hwmon_power_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_power_input:
                        return 0444;
                default:
                        return 0;
index cb0cc30..e3270de 100644 (file)
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
                USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
                .driver_info = (unsigned long)&qmi_wwan_info,
        },
+       {       /* Quectel EP06/EG06/EM06 */
+               USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
+                                             USB_CLASS_VENDOR_SPEC,
+                                             USB_SUBCLASS_VENDOR_SPEC,
+                                             0xff),
+               .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
+       },
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
@@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
-       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
@@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
        return false;
 }
 
+static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+{
+       struct usb_device *dev = interface_to_usbdev(intf);
+       struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+
+       if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+           le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+           intf_desc.bNumEndpoints == 2)
+               return true;
+
+       return false;
+}
+
 static int qmi_wwan_probe(struct usb_interface *intf,
                          const struct usb_device_id *prod)
 {
@@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
                return -ENODEV;
        }
 
+       /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+        * we need to match on class/subclass/protocol. These values are
+        * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+        * different. Ignore the current interface if the number of endpoints
+        * the number for the diag interface (two).
+        */
+       if (quectel_ep06_diag_detected(intf))
+               return -ENODEV;
+
        return usbnet_probe(intf, id);
 }
 
index b4c3a95..73969db 100644 (file)
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd, *copy_rd;
-       int size_of_regd, regd_to_copy, wmms_to_copy;
-       int size_of_wmms = 0;
+       int size_of_regd, regd_to_copy;
        struct ieee80211_reg_rule *rule;
-       struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
        struct regdb_ptrs *regdb_ptrs;
        enum nl80211_band band;
        int center_freq, prev_center_freq = 0;
-       int valid_rules = 0, n_wmms = 0;
-       int i;
+       int valid_rules = 0;
        bool new_rule;
        int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
                         IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                sizeof(struct ieee80211_regdomain) +
                num_of_ch * sizeof(struct ieee80211_reg_rule);
 
-       if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
-               size_of_wmms =
-                       num_of_ch * sizeof(struct ieee80211_wmm_rule);
-
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd->alpha2[0] = fw_mcc >> 8;
        regd->alpha2[1] = fw_mcc & 0xff;
 
-       wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
                band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                    band == NL80211_BAND_2GHZ)
                        continue;
 
-               if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
-                                        &regdb_ptrs[n_wmms].token, wmm_rule)) {
-                       /* Add only new rules */
-                       for (i = 0; i < n_wmms; i++) {
-                               if (regdb_ptrs[i].token ==
-                                   regdb_ptrs[n_wmms].token) {
-                                       rule->wmm_rule = regdb_ptrs[i].rule;
-                                       break;
-                               }
-                       }
-                       if (i == n_wmms) {
-                               rule->wmm_rule = wmm_rule;
-                               regdb_ptrs[n_wmms++].rule = wmm_rule;
-                               wmm_rule++;
-                       }
-               }
+               reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
        }
 
        regd->n_reg_rules = valid_rules;
-       regd->n_wmm_rules = n_wmms;
 
        /*
         * Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd_to_copy = sizeof(struct ieee80211_regdomain) +
                valid_rules * sizeof(struct ieee80211_reg_rule);
 
-       wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
-
-       copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+       copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
        if (!copy_rd) {
                copy_rd = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        memcpy(copy_rd, regd, regd_to_copy);
-       memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
-              wmms_to_copy);
-
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
-       for (i = 0; i < regd->n_reg_rules; i++) {
-               if (!regd->reg_rules[i].wmm_rule)
-                       continue;
-
-               copy_rd->reg_rules[i].wmm_rule = d_wmm +
-                       (regd->reg_rules[i].wmm_rule - s_wmm);
-       }
 
 out:
        kfree(regdb_ptrs);
index 998dfac..1068757 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
+#include <linux/nospec.h>
 #include "mac80211_hwsim.h"
 
 #define WARN_QUEUE 100
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                IEEE80211_VHT_CAP_SHORT_GI_80 |
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                                IEEE80211_VHT_CAP_TXSTBC |
-                               IEEE80211_VHT_CAP_RXSTBC_1 |
-                               IEEE80211_VHT_CAP_RXSTBC_2 |
-                               IEEE80211_VHT_CAP_RXSTBC_3 |
                                IEEE80211_VHT_CAP_RXSTBC_4 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
                        sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
        if (info->attrs[HWSIM_ATTR_CHANNELS])
                param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+       if (param.channels < 1) {
+               GENL_SET_ERR_MSG(info, "must have at least one channel");
+               return -EINVAL;
+       }
+
        if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
                GENL_SET_ERR_MSG(info, "too many channels specified");
                return -EINVAL;
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        kfree(hwname);
                        return -EINVAL;
                }
+
+               idx = array_index_nospec(idx,
+                                        ARRAY_SIZE(hwsim_world_regdom_custom));
                param.regd = hwsim_world_regdom_custom[idx];
        }
 
index 73f596a..9407acb 100644 (file)
@@ -87,8 +87,7 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
-static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 
 struct netfront_stats {
        u64                     packets;
@@ -1332,11 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netif_carrier_off(netdev);
 
        xenbus_switch_state(dev, XenbusStateInitialising);
-       wait_event(module_load_q,
-                          xenbus_read_driver_state(dev->otherend) !=
-                          XenbusStateClosed &&
-                          xenbus_read_driver_state(dev->otherend) !=
-                          XenbusStateUnknown);
+       wait_event(module_wq,
+                  xenbus_read_driver_state(dev->otherend) !=
+                  XenbusStateClosed &&
+                  xenbus_read_driver_state(dev->otherend) !=
+                  XenbusStateUnknown);
        return netdev;
 
  exit:
@@ -2010,15 +2009,14 @@ static void netback_changed(struct xenbus_device *dev,
 
        dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
 
+       wake_up_all(&module_wq);
+
        switch (backend_state) {
        case XenbusStateInitialising:
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
-               break;
-
        case XenbusStateUnknown:
-               wake_up_all(&module_unload_q);
                break;
 
        case XenbusStateInitWait:
@@ -2034,12 +2032,10 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosed:
-               wake_up_all(&module_unload_q);
                if (dev->state == XenbusStateClosed)
                        break;
                /* Missed the backend's CLOSING state -- fallthrough */
        case XenbusStateClosing:
-               wake_up_all(&module_unload_q);
                xenbus_frontend_closed(dev);
                break;
        }
@@ -2147,14 +2143,14 @@ static int xennet_remove(struct xenbus_device *dev)
 
        if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
                xenbus_switch_state(dev, XenbusStateClosing);
-               wait_event(module_unload_q,
+               wait_event(module_wq,
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateClosing ||
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateUnknown);
 
                xenbus_switch_state(dev, XenbusStateClosed);
-               wait_event(module_unload_q,
+               wait_event(module_wq,
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateClosed ||
                           xenbus_read_driver_state(dev->otherend) ==
index 3533e91..bfc4da6 100644 (file)
@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
 
        struct nvmet_req        req;
 
+       bool                    allocated;
        u8                      n_rdma;
        u32                     flags;
        u32                     invalidate_rkey;
@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
        unsigned long flags;
 
        spin_lock_irqsave(&queue->rsps_lock, flags);
-       rsp = list_first_entry(&queue->free_rsps,
+       rsp = list_first_entry_or_null(&queue->free_rsps,
                                struct nvmet_rdma_rsp, free_list);
-       list_del(&rsp->free_list);
+       if (likely(rsp))
+               list_del(&rsp->free_list);
        spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
+       if (unlikely(!rsp)) {
+               rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+               if (unlikely(!rsp))
+                       return NULL;
+               rsp->allocated = true;
+       }
+
        return rsp;
 }
 
@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
 {
        unsigned long flags;
 
+       if (rsp->allocated) {
+               kfree(rsp);
+               return;
+       }
+
        spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
        list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
        spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
        cmd->queue = queue;
        rsp = nvmet_rdma_get_rsp(queue);
+       if (unlikely(!rsp)) {
+               /*
+                * we get here only under memory pressure,
+                * silently drop and have the host retry
+                * as we can't even fail it.
+                */
+               nvmet_rdma_post_recv(queue->dev, cmd);
+               return;
+       }
        rsp->queue = queue;
        rsp->cmd = cmd;
        rsp->flags = 0;
index 9095b82..74eaedd 100644 (file)
@@ -140,6 +140,9 @@ void of_populate_phandle_cache(void)
                if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
                        phandles++;
 
+       if (!phandles)
+               goto out;
+
        cache_entries = roundup_pow_of_two(phandles);
        phandle_cache_mask = cache_entries - 1;
 
index 7ba90c2..6c59673 100644 (file)
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
        if (!dev)
                goto err_clear_flag;
 
+       /* AMBA devices only support a single DMA mask */
+       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+
        /* setup generic device info */
        dev->dev.of_node = of_node_get(node);
        dev->dev.fwnode = &node->fwnode;
index 7136e34..a938abd 100644 (file)
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
        u16 slot_status;
        int retval;
 
-       /* Clear sticky power-fault bit from previous power failures */
+       /* Clear power-fault bit from previous power failures */
        pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
        if (slot_status & PCI_EXP_SLTSTA_PFD)
                pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
                pciehp_handle_button_press(slot);
        }
 
+       /* Check Power Fault Detected */
+       if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+               ctrl->power_fault_detected = 1;
+               ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
+               pciehp_set_attention_status(slot, 1);
+               pciehp_green_led_off(slot);
+       }
+
        /*
         * Disable requests have higher priority than Presence Detect Changed
         * or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
                pciehp_handle_presence_or_link_change(slot, events);
        up_read(&ctrl->reset_lock);
 
-       /* Check Power Fault Detected */
-       if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
-               ctrl->power_fault_detected = 1;
-               ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
-               pciehp_set_attention_status(slot, 1);
-               pciehp_green_led_off(slot);
-       }
-
        pci_config_pm_runtime_put(pdev);
        wake_up(&ctrl->requester);
        return IRQ_HANDLED;
index 29ff961..1835f3a 100644 (file)
@@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
 
        return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
 }
+EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
 
 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
 {
@@ -5200,7 +5201,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
  */
 int pci_reset_bus(struct pci_dev *pdev)
 {
-       return pci_probe_reset_slot(pdev->slot) ?
+       return (!pci_probe_reset_slot(pdev->slot)) ?
            __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
 }
 EXPORT_SYMBOL_GPL(pci_reset_bus);
index ec78400..201f9e5 100644 (file)
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
 {
 #ifdef CONFIG_PCI_PASID
        struct pci_dev *bridge;
+       int pcie_type;
        u32 cap;
 
        if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
        if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
                return;
 
-       if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+       pcie_type = pci_pcie_type(dev);
+       if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+           pcie_type == PCI_EXP_TYPE_RC_END)
                dev->eetlp_prefix_path = 1;
        else {
                bridge = pci_upstream_bridge(dev);
index ef7143a..6bc27b7 100644 (file)
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
  *
  * 0x9d10-0x9d1b PCI Express Root port #{1-12}
  *
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
        case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
        case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
        case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
-       case 0xa32c ... 0xa343:                         /* 300 series */
                return true;
        }
 
index 9940cc7..54a8b30 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/poll.h>
 #include <linux/wait.h>
 
+#include <linux/nospec.h>
+
 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
 MODULE_VERSION("0.1");
 MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
        default:
                if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
                        return -EINVAL;
+               p.port = array_index_nospec(p.port,
+                                       ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
                p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
                break;
        }
index ece41fb..c4f4d90 100644 (file)
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
        }
 
        /* if the configuration is provided through pdata, apply it */
-       if (pdata) {
+       if (pdata && pdata->gpio_configs) {
                ret = pinctrl_register_mappings(pdata->gpio_configs,
                                                pdata->n_gpio_configs);
                if (ret) {
index 6a1b605..628817c 100644 (file)
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
 
                err = pinctrl_generic_add_group(jzpc->pctl, group->name,
                                group->pins, group->num_pins, group->data);
-               if (err) {
+               if (err < 0) {
                        dev_err(dev, "Failed to register group %s\n",
                                        group->name);
                        return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
                err = pinmux_generic_add_function(jzpc->pctl, func->name,
                                func->group_names, func->num_group_names,
                                func->data);
-               if (err) {
+               if (err < 0) {
                        dev_err(dev, "Failed to register function %s\n",
                                        func->name);
                        return err;
index 2155a30..5d72ffa 100644 (file)
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = readl(pctrl->regs + g->intr_cfg_reg);
+       /*
+        * There are two bits that control interrupt forwarding to the CPU. The
+        * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
+        * latched into the interrupt status register when the hardware detects
+        * an irq that it's configured for (either edge for edge type or level
+        * for level type irq). The 'non-raw' status enable bit causes the
+        * hardware to assert the summary interrupt to the CPU if the latched
+        * status bit is set. There's a bug though, the edge detection logic
+        * seems to have a problem where toggling the RAW_STATUS_EN bit may
+        * cause the status bit to latch spuriously when there isn't any edge
+        * so we can't touch that bit for edge type irqs and we have to keep
+        * the bit set anyway so that edges are latched while the line is masked.
+        *
+        * To make matters more complicated, leaving the RAW_STATUS_EN bit
+        * enabled all the time causes level interrupts to re-latch into the
+        * status register because the level is still present on the line after
+        * we ack it. We clear the raw status enable bit during mask here and
+        * set the bit on unmask so the interrupt can't latch into the hardware
+        * while it's masked.
+        */
+       if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
+               val &= ~BIT(g->intr_raw_status_bit);
+
        val &= ~BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
 
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = readl(pctrl->regs + g->intr_cfg_reg);
+       val |= BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
 
index ec891bc..f039266 100644 (file)
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
        if (bits & 0x07)
                return -EINVAL;
 
-       memset(bitmap, 0, bits / 8);
-
        if (str[0] == '0' && str[1] == 'x')
                str++;
        if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
 }
 
 /*
- * str2clrsetmasks() - parse bitmask argument and set the clear and
- * the set bitmap mask. A concatenation (done with ',') of these terms
- * is recognized:
+ * modify_bitmap() - parse bitmask argument and modify an existing
+ * bit mask accordingly. A concatenation (done with ',') of these
+ * terms is recognized:
  *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
  * <bitnr> may be any valid number (hex, decimal or octal) in the range
  * 0...bits-1; the leading + or - is required. Here are some examples:
  *   +0-15,+32,-128,-0xFF
  *   -0-255,+1-16,+0x128
  *   +1,+2,+3,+4,-5,-7-10
- * Returns a clear and a set bitmask. Every positive value in the string
- * results in a bit set in the set mask and every negative value in the
- * string results in a bit SET in the clear mask. As a bit may be touched
- * more than once, the last 'operation' wins: +0-255,-128 = all but bit
- * 128 set in the set mask, only bit 128 set in the clear mask.
+ * Returns the new bitmap after all changes have been applied. Every
+ * positive value in the string will set a bit and every negative value
+ * in the string will clear a bit. As a bit may be touched more than once,
+ * the last 'operation' wins:
+ * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
+ * cleared again. All other bits are unmodified.
  */
-static int str2clrsetmasks(const char *str,
-                          unsigned long *clrmap,
-                          unsigned long *setmap,
-                          int bits)
+static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
 {
        int a, i, z;
        char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
        if (bits & 0x07)
                return -EINVAL;
 
-       memset(clrmap, 0, bits / 8);
-       memset(setmap, 0, bits / 8);
-
        while (*str) {
                sign = *str++;
                if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
                        str = np;
                }
                for (i = a; i <= z; i++)
-                       if (sign == '+') {
-                               set_bit_inv(i, setmap);
-                               clear_bit_inv(i, clrmap);
-                       } else {
-                               clear_bit_inv(i, setmap);
-                               set_bit_inv(i, clrmap);
-                       }
+                       if (sign == '+')
+                               set_bit_inv(i, bitmap);
+                       else
+                               clear_bit_inv(i, bitmap);
                while (*str == ',' || *str == '\n')
                        str++;
        }
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
                            unsigned long *bitmap, int bits,
                            struct mutex *lock)
 {
-       int i;
+       unsigned long *newmap, size;
+       int rc;
 
        /* bits needs to be a multiple of 8 */
        if (bits & 0x07)
                return -EINVAL;
 
+       size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+       newmap = kmalloc(size, GFP_KERNEL);
+       if (!newmap)
+               return -ENOMEM;
+       if (mutex_lock_interruptible(lock)) {
+               kfree(newmap);
+               return -ERESTARTSYS;
+       }
+
        if (*str == '+' || *str == '-') {
-               DECLARE_BITMAP(clrm, bits);
-               DECLARE_BITMAP(setm, bits);
-
-               i = str2clrsetmasks(str, clrm, setm, bits);
-               if (i)
-                       return i;
-               if (mutex_lock_interruptible(lock))
-                       return -ERESTARTSYS;
-               for (i = 0; i < bits; i++) {
-                       if (test_bit_inv(i, clrm))
-                               clear_bit_inv(i, bitmap);
-                       if (test_bit_inv(i, setm))
-                               set_bit_inv(i, bitmap);
-               }
+               memcpy(newmap, bitmap, size);
+               rc = modify_bitmap(str, newmap, bits);
        } else {
-               DECLARE_BITMAP(setm, bits);
-
-               i = hex2bitmap(str, setm, bits);
-               if (i)
-                       return i;
-               if (mutex_lock_interruptible(lock))
-                       return -ERESTARTSYS;
-               for (i = 0; i < bits; i++)
-                       if (test_bit_inv(i, setm))
-                               set_bit_inv(i, bitmap);
-                       else
-                               clear_bit_inv(i, bitmap);
+               memset(newmap, 0, size);
+               rc = hex2bitmap(str, newmap, bits);
        }
+       if (rc == 0)
+               memcpy(bitmap, newmap, size);
        mutex_unlock(lock);
-
-       return 0;
+       kfree(newmap);
+       return rc;
 }
 
 /*
index 49f64eb..de82824 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
@@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
 
        priv.buffer_len = oat_data.buffer_len;
        priv.response_len = 0;
-       priv.buffer =  kzalloc(oat_data.buffer_len, GFP_KERNEL);
+       priv.buffer = vzalloc(oat_data.buffer_len);
        if (!priv.buffer) {
                rc = -ENOMEM;
                goto out;
@@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
                        rc = -EFAULT;
 
 out_free:
-       kfree(priv.buffer);
+       vfree(priv.buffer);
 out:
        return rc;
 }
@@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                dev->hw_features |= NETIF_F_SG;
                dev->vlan_features |= NETIF_F_SG;
+               if (IS_IQD(card))
+                       dev->features |= NETIF_F_SG;
        }
 
        return dev;
@@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
        qeth_update_from_chp_desc(card);
 
        card->dev = qeth_alloc_netdev(card);
-       if (!card->dev)
+       if (!card->dev) {
+               rc = -ENOMEM;
                goto err_card;
+       }
 
        qeth_determine_capabilities(card);
        enforced_disc = qeth_enforce_discipline(card);
index 710fa74..b5e3853 100644 (file)
@@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index 7175086..ada258c 100644 (file)
@@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index 8fc851a..7c09700 100644 (file)
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
        default y
        depends on SCSI
        ---help---
-         This option enables the new blk-mq based I/O path for SCSI
-         devices by default.  With the option the scsi_mod.use_blk_mq
-         module/boot option defaults to Y, without it to N, but it can
-         still be overridden either way.
+         This option enables the blk-mq based I/O path for SCSI devices by
+         default.  With this option the scsi_mod.use_blk_mq module/boot
+         option defaults to Y, without it to N, but it can still be
+         overridden either way.
 
-         If unsure say N.
+         If unsure say Y.
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
index 29bf1e6..39eb415 100644 (file)
@@ -1346,7 +1346,7 @@ struct fib {
 struct aac_hba_map_info {
        __le32  rmw_nexus;              /* nexus for native HBA devices */
        u8              devtype;        /* device type */
-       u8              reset_state;    /* 0 - no reset, 1..x - */
+       s8              reset_state;    /* 0 - no reset, 1..x - */
                                        /* after xth TM LUN reset */
        u16             qd_limit;
        u32             scan_counter;
index 23d07e9..e519238 100644 (file)
@@ -1601,6 +1601,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
        return caps32;
 }
 
+/**
+ *     fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ *     @caps32: a 32-bit Port Capabilities value
+ *
+ *     Returns the equivalent 16-bit Port Capabilities value.  Note that
+ *     not all 32-bit Port Capabilities can be represented in the 16-bit
+ *     Port Capabilities and some fields/values may not make it.
+ */
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+       fw_port_cap16_t caps16 = 0;
+
+       #define CAP32_TO_CAP16(__cap) \
+               do { \
+                       if (caps32 & FW_PORT_CAP32_##__cap) \
+                               caps16 |= FW_PORT_CAP_##__cap; \
+               } while (0)
+
+       CAP32_TO_CAP16(SPEED_100M);
+       CAP32_TO_CAP16(SPEED_1G);
+       CAP32_TO_CAP16(SPEED_10G);
+       CAP32_TO_CAP16(SPEED_25G);
+       CAP32_TO_CAP16(SPEED_40G);
+       CAP32_TO_CAP16(SPEED_100G);
+       CAP32_TO_CAP16(FC_RX);
+       CAP32_TO_CAP16(FC_TX);
+       CAP32_TO_CAP16(802_3_PAUSE);
+       CAP32_TO_CAP16(802_3_ASM_DIR);
+       CAP32_TO_CAP16(ANEG);
+       CAP32_TO_CAP16(FORCE_PAUSE);
+       CAP32_TO_CAP16(MDIAUTO);
+       CAP32_TO_CAP16(MDISTRAIGHT);
+       CAP32_TO_CAP16(FEC_RS);
+       CAP32_TO_CAP16(FEC_BASER_RS);
+
+       #undef CAP32_TO_CAP16
+
+       return caps16;
+}
+
 /**
  *      lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
  *      @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
                        val = 1;
 
                        csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                                      hw->pfn, 0, 1, &param, &val, false,
+                                      hw->pfn, 0, 1, &param, &val, true,
                                       NULL);
 
                        if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
                                return -EINVAL;
                        }
 
-                       csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
-                                                       &val);
-                       if (retval != FW_SUCCESS) {
-                               csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
-                                        portid, retval);
-                               mempool_free(mbp, hw->mb_mempool);
-                               return -EINVAL;
-                       }
-
-                       fw_caps = val;
+                       csio_mb_process_read_params_rsp(hw, mbp, &retval,
+                                                       0, NULL);
+                       fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
                }
 
                /* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
 }
 
 /*
- * Returns -EINVAL if attempts to flash the firmware failed
- * else returns 0,
+ * Returns -EINVAL if attempts to flash the firmware failed,
+ * -ENOMEM if memory allocation failed else returns 0,
  * if flashing was not attempted because the card had the
  * latest firmware ECANCELED is returned
  */
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                return -EINVAL;
        }
 
+       /* allocate memory to read the header of the firmware on the
+        * card
+        */
+       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
+       if (!card_fw)
+               return -ENOMEM;
+
        if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
                fw_bin_file = FW_FNAME_T5;
        else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                fw_size = fw->size;
        }
 
-       /* allocate memory to read the header of the firmware on the
-        * card
-        */
-       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
-
        /* upgrade FW logic */
        ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
                         hw->fw_state, reset);
index 9e73ef7..e351af6 100644 (file)
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
 
 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
 
 int csio_hw_start(struct csio_hw *);
index c026417..6f13673 100644 (file)
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
                        FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
 
        if (fw_caps == FW_CAPS16)
-               cmdp->u.l1cfg.rcap = cpu_to_be32(fc);
+               cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
        else
                cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
 }
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
                        *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
                        *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
                } else {
-                       *pcaps = ntohs(rsp->u.info32.pcaps32);
-                       *acaps = ntohs(rsp->u.info32.acaps32);
+                       *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
+                       *acaps = be32_to_cpu(rsp->u.info32.acaps32);
                }
        }
 }
index f02dcc8..ea4b0bb 100644 (file)
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(scsi_host_get);
 
-struct scsi_host_mq_in_flight {
-       int cnt;
-};
-
-static void scsi_host_check_in_flight(struct request *rq, void *data,
-               bool reserved)
-{
-       struct scsi_host_mq_in_flight *in_flight = data;
-
-       if (blk_mq_request_started(rq))
-               in_flight->cnt++;
-}
-
 /**
  * scsi_host_busy - Return the host busy counter
  * @shost:     Pointer to Scsi_Host to inc.
  **/
 int scsi_host_busy(struct Scsi_Host *shost)
 {
-       struct scsi_host_mq_in_flight in_flight = {
-               .cnt = 0,
-       };
-
-       if (!shost->use_blk_mq)
-               return atomic_read(&shost->host_busy);
-
-       blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
-                       &in_flight);
-       return in_flight.cnt;
+       return atomic_read(&shost->host_busy);
 }
 EXPORT_SYMBOL(scsi_host_busy);
 
index 58bb70b..c120929 100644 (file)
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
 #endif
        .sdev_attrs = hpsa_sdev_attrs,
        .shost_attrs = hpsa_shost_attrs,
-       .max_sectors = 1024,
+       .max_sectors = 2048,
        .no_write_same = 1,
 };
 
index e0d0da5..43732e8 100644 (file)
@@ -672,7 +672,7 @@ struct lpfc_hba {
 #define LS_NPIV_FAB_SUPPORTED 0x2      /* Fabric supports NPIV */
 #define LS_IGNORE_ERATT       0x4      /* intr handler should ignore ERATT */
 #define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK      0x16      /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK      0x10      /* MDS Diagnostics Link Up (Loopback) */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
index 5a25553..057a60a 100644 (file)
@@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
 
 /*
 # lpfc_fdmi_on: Controls FDMI support.
-#       0       No FDMI support (default)
-#       1       Traditional FDMI support
+#       0       No FDMI support
+#       1       Traditional FDMI support (default)
 # Traditional FDMI support means the driver will assume FDMI-2 support;
 # however, if that fails, it will fallback to FDMI-1.
 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
 # lpfc_fdmi_on.
-# Value range [0,1]. Default value is 0.
+# Value range [0,1]. Default value is 1.
 */
-LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
 
 /*
 # Specifies the maximum number of ELS cmds we can have outstanding (for
index fc3babc..a6f96b3 100644 (file)
@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
        QEDI_NVM_TGT_SEC,
 };
 
+struct qedi_nvm_iscsi_image {
+       struct nvm_iscsi_cfg iscsi_cfg;
+       u32 crc;
+};
+
 struct qedi_uio_ctrl {
        /* meta data */
        u32 uio_hsi_version;
@@ -294,7 +299,7 @@ struct qedi_ctx {
        void *bdq_pbl_list;
        dma_addr_t bdq_pbl_list_dma;
        u8 bdq_pbl_list_num_entries;
-       struct nvm_iscsi_cfg *iscsi_cfg;
+       struct qedi_nvm_iscsi_image *iscsi_image;
        dma_addr_t nvm_buf_dma;
        void __iomem *bdq_primary_prod;
        void __iomem *bdq_secondary_prod;
index aa96bcc..cc8e64d 100644 (file)
@@ -1346,23 +1346,26 @@ exit_setup_int:
 
 static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       if (qedi->iscsi_cfg)
+       if (qedi->iscsi_image)
                dma_free_coherent(&qedi->pdev->dev,
-                                 sizeof(struct nvm_iscsi_cfg),
-                                 qedi->iscsi_cfg, qedi->nvm_buf_dma);
+                                 sizeof(struct qedi_nvm_iscsi_image),
+                                 qedi->iscsi_image, qedi->nvm_buf_dma);
 }
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
-                                            sizeof(struct nvm_iscsi_cfg),
-                                            &qedi->nvm_buf_dma, GFP_KERNEL);
-       if (!qedi->iscsi_cfg) {
+       struct qedi_nvm_iscsi_image nvm_image;
+
+       qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
+                                               sizeof(nvm_image),
+                                               &qedi->nvm_buf_dma,
+                                               GFP_KERNEL);
+       if (!qedi->iscsi_image) {
                QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
                return -ENOMEM;
        }
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-                 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+                 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
                  qedi->nvm_buf_dma);
 
        return 0;
@@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
        struct nvm_iscsi_block *block;
 
        pf = qedi->dev_info.common.abs_pf_id;
-       block = &qedi->iscsi_cfg->block[0];
+       block = &qedi->iscsi_image->iscsi_cfg.block[0];
        for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
                flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
                        NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
@@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data)
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
        int ret = 1;
-       u16 len;
-
-       len = sizeof(struct nvm_iscsi_cfg);
+       struct qedi_nvm_iscsi_image nvm_image;
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Get NVM iSCSI CFG image\n");
        ret = qedi_ops->common->nvm_get_image(qedi->cdev,
                                              QED_NVM_IMAGE_ISCSI_CFG,
-                                             (char *)qedi->iscsi_cfg, len);
+                                             (char *)qedi->iscsi_image,
+                                             sizeof(nvm_image));
        if (ret)
                QEDI_ERR(&qedi->dbg_ctx,
                         "Could not get NVM image. ret = %d\n", ret);
index 0adfb3b..eb97d2d 100644 (file)
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
        unsigned long flags;
 
        rcu_read_lock();
-       if (!shost->use_blk_mq)
-               atomic_dec(&shost->host_busy);
+       atomic_dec(&shost->host_busy);
        if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
 
 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 {
-       /*
-        * blk-mq can handle host queue busy efficiently via host-wide driver
-        * tag allocation
-        */
-
-       if (!shost->use_blk_mq && shost->can_queue > 0 &&
+       if (shost->can_queue > 0 &&
            atomic_read(&shost->host_busy) >= shost->can_queue)
                return true;
        if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
        if (scsi_host_in_recovery(shost))
                return 0;
 
-       if (!shost->use_blk_mq)
-               busy = atomic_inc_return(&shost->host_busy) - 1;
-       else
-               busy = 0;
+       busy = atomic_inc_return(&shost->host_busy) - 1;
        if (atomic_read(&shost->host_blocked) > 0) {
                if (busy)
                        goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
                                     "unblocking host at zero depth\n"));
        }
 
-       if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
+       if (shost->can_queue > 0 && busy >= shost->can_queue)
                goto starved;
        if (shost->host_self_blocked)
                goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
         * with the locks as normal issue path does.
         */
        atomic_inc(&sdev->device_busy);
-
-       if (!shost->use_blk_mq)
-               atomic_inc(&shost->host_busy);
+       atomic_inc(&shost->host_busy);
        if (starget->can_queue > 0)
                atomic_inc(&starget->target_busy);
 
index 96f6149..663b755 100644 (file)
@@ -2,7 +2,7 @@
 
 config EROFS_FS
        tristate "EROFS filesystem support"
-       depends on BROKEN
+       depends on BLOCK
        help
          EROFS(Enhanced Read-Only File System) is a lightweight
          read-only file system with modern designs (eg. page-sized
index 1aec509..2df9768 100644 (file)
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
                goto err_sbread;
 
        sb->s_magic = EROFS_SUPER_MAGIC;
-       sb->s_flags |= MS_RDONLY | MS_NOATIME;
+       sb->s_flags |= SB_RDONLY | SB_NOATIME;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_time_gran = 1;
 
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
 {
        BUG_ON(!sb_rdonly(sb));
 
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
index 7e64c7e..a9f4802 100644 (file)
@@ -2,3 +2,7 @@
   GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
   lines from device tree, ACPI or board files, board files should
   use <linux/gpio/machine.h>
+
+* convert all these over to drm_simple_display_pipe and submit for inclusion
+  into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
+  drivers anymore.
index 6ff8e01..5b1865f 100644 (file)
@@ -1,9 +1,22 @@
 This is a list of things that need to be done to get this driver out of the
 staging directory.
+
+- Implement the gasket framework's functionality through UIO instead of
+  introducing a new user-space drivers framework that is quite similar.
+
+  UIO provides the necessary bits to implement user-space drivers. Meanwhile
+  the gasket APIs adds some extra conveniences like PCI BAR mapping, and
+  MSI interrupts. Add these features to the UIO subsystem, then re-implement
+  the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
+
 - Document sysfs files with Documentation/ABI/ entries.
+
 - Use misc interface instead of major number for driver version description.
+
 - Add descriptions of module_param's
+
 - apex_get_status() should actually check status.
+
 - "drivers" should never be dealing with "raw" sysfs calls or mess around with
   kobjects at all. The driver core should handle all of this for you
   automaically. There should not be a need for raw attribute macros.
index da92c49..69cc508 100644 (file)
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                ret = PTR_ERR(dev);
                goto err_drv_alloc;
        }
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_pci_enable;
+
        dev->pdev = pdev;
        pci_set_drvdata(pdev, dev);
 
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  err_drv_dev_register:
        vbox_driver_unload(dev);
  err_vbox_driver_load:
+       pci_disable_device(pdev);
+ err_pci_enable:
        drm_dev_put(dev);
  err_drv_alloc:
        return ret;
index a83eac8..79836c8 100644 (file)
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
        if (rc)
                return rc;
 
+       mutex_lock(&vbox->hw_mutex);
+       vbox_set_view(crtc);
+       vbox_do_modeset(crtc, &crtc->mode);
+       mutex_unlock(&vbox->hw_mutex);
+
        spin_lock_irqsave(&drm->event_lock, flags);
 
        if (event)
index f7b07c0..ee7e26b 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_WILC1000) += wilc1000.o
 
 ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
                -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
                        wilc_wlan.o
 
 obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += $(wilc1000-objs)
 wilc1000-sdio-objs += wilc_sdio.o
 
 obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += $(wilc1000-objs)
 wilc1000-spi-objs += wilc_spi.o
index 01cf4bd..3b8d237 100644 (file)
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
        }
 
        kfree(wilc);
-       wilc_debugfs_remove();
 }
+EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
 
 static const struct net_device_ops wilc_netdev_ops = {
        .ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
        if (!wl)
                return -ENOMEM;
 
-       wilc_debugfs_init();
        *wilc = wl;
        wl->io_type = io_type;
        wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(wilc_netdev_init);
+
+MODULE_LICENSE("GPL");
index edc7287..8001df6 100644 (file)
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
 
 #define DBG_LEVEL_ALL  (DEBUG | INFO | WRN | ERR)
 static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
+EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
 
 static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
                                     size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
        },
 };
 
-int wilc_debugfs_init(void)
+static int __init wilc_debugfs_init(void)
 {
        int i;
        struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
        }
        return 0;
 }
+module_init(wilc_debugfs_init);
 
-void wilc_debugfs_remove(void)
+static void __exit wilc_debugfs_remove(void)
 {
        debugfs_remove_recursive(wilc_dir);
 }
+module_exit(wilc_debugfs_remove);
 
 #endif
index 6787b6e..8b184aa 100644 (file)
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
        wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
 }
+EXPORT_SYMBOL_GPL(chip_allow_sleep);
 
 void chip_wakeup(struct wilc *wilc)
 {
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
        }
        chip_ps_state = CHIP_WAKEDUP;
 }
+EXPORT_SYMBOL_GPL(chip_wakeup);
 
 void wilc_chip_sleep_manually(struct wilc *wilc)
 {
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
        chip_ps_state = CHIP_SLEEPING_MANUAL;
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
 
 void host_wakeup_notify(struct wilc *wilc)
 {
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(host_wakeup_notify);
 
 void host_sleep_notify(struct wilc *wilc)
 {
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(host_sleep_notify);
 
 int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
 {
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
 
        release_bus(wilc, RELEASE_ALLOW_SLEEP);
 }
+EXPORT_SYMBOL_GPL(wilc_handle_isr);
 
 int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
                                u32 buffer_size)
index 00d13b1..b81a73b 100644 (file)
@@ -831,6 +831,4 @@ struct wilc;
 int wilc_wlan_init(struct net_device *dev);
 u32 wilc_get_chipid(struct wilc *wilc, bool update);
 
-int wilc_debugfs_init(void);
-void wilc_debugfs_remove(void);
 #endif
index 768cce0..76a2626 100644 (file)
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
        ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
        sgl->offset = sg_offset;
        if (!ret) {
-               pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
-                       __func__, 0, xferlen, sgcnt);
+               pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+                        __func__, 0, xferlen, sgcnt);
                goto rel_ppods;
        }
 
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
        if (ret < 0) {
-               pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
-                       csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+               pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+                        csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
 
                ttinfo->sgl = NULL;
                ttinfo->nents = 0;
index 94bad43..9cdfccb 100644 (file)
@@ -4208,22 +4208,15 @@ int iscsit_close_connection(
                crypto_free_ahash(tfm);
        }
 
-       free_cpumask_var(conn->conn_cpumask);
-
-       kfree(conn->conn_ops);
-       conn->conn_ops = NULL;
-
        if (conn->sock)
                sock_release(conn->sock);
 
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
-       iscsit_put_transport(conn->conn_transport);
-
        pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
        conn->conn_state = TARG_CONN_STATE_FREE;
-       kfree(conn);
+       iscsit_free_conn(conn);
 
        spin_lock_bh(&sess->conn_lock);
        atomic_dec(&sess->nconn);
index 9e74f8b..bb90c80 100644 (file)
@@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
                goto out_req_buf;
        }
 
-       conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
-       if (!conn->conn_ops) {
-               pr_err("Unable to allocate memory for"
-                       " struct iscsi_conn_ops.\n");
-               goto out_rsp_buf;
-       }
-
-       init_waitqueue_head(&conn->queues_wq);
-       INIT_LIST_HEAD(&conn->conn_list);
-       INIT_LIST_HEAD(&conn->conn_cmd_list);
-       INIT_LIST_HEAD(&conn->immed_queue_list);
-       INIT_LIST_HEAD(&conn->response_queue_list);
-       init_completion(&conn->conn_post_wait_comp);
-       init_completion(&conn->conn_wait_comp);
-       init_completion(&conn->conn_wait_rcfr_comp);
-       init_completion(&conn->conn_waiting_on_uc_comp);
-       init_completion(&conn->conn_logout_comp);
-       init_completion(&conn->rx_half_close_comp);
-       init_completion(&conn->tx_half_close_comp);
-       init_completion(&conn->rx_login_comp);
-       spin_lock_init(&conn->cmd_lock);
-       spin_lock_init(&conn->conn_usage_lock);
-       spin_lock_init(&conn->immed_queue_lock);
-       spin_lock_init(&conn->nopin_timer_lock);
-       spin_lock_init(&conn->response_queue_lock);
-       spin_lock_init(&conn->state_lock);
-
-       if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
-               pr_err("Unable to allocate conn->conn_cpumask\n");
-               goto out_conn_ops;
-       }
        conn->conn_login = login;
 
        return login;
 
-out_conn_ops:
-       kfree(conn->conn_ops);
-out_rsp_buf:
-       kfree(login->rsp_buf);
 out_req_buf:
        kfree(login->req_buf);
 out_login:
@@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1(
                return -ENOMEM;
        }
 
-       ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
-       if (unlikely(ret)) {
-               kfree(sess);
-               return ret;
-       }
+       if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
+               goto free_sess;
+
        sess->init_task_tag     = pdu->itt;
        memcpy(&sess->isid, pdu->isid, 6);
        sess->exp_cmd_sn        = be32_to_cpu(pdu->cmdsn);
@@ -1149,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
        return 0;
 }
 
+static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
+{
+       struct iscsi_conn *conn;
+
+       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       if (!conn) {
+               pr_err("Could not allocate memory for new connection\n");
+               return NULL;
+       }
+       pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+       conn->conn_state = TARG_CONN_STATE_FREE;
+
+       init_waitqueue_head(&conn->queues_wq);
+       INIT_LIST_HEAD(&conn->conn_list);
+       INIT_LIST_HEAD(&conn->conn_cmd_list);
+       INIT_LIST_HEAD(&conn->immed_queue_list);
+       INIT_LIST_HEAD(&conn->response_queue_list);
+       init_completion(&conn->conn_post_wait_comp);
+       init_completion(&conn->conn_wait_comp);
+       init_completion(&conn->conn_wait_rcfr_comp);
+       init_completion(&conn->conn_waiting_on_uc_comp);
+       init_completion(&conn->conn_logout_comp);
+       init_completion(&conn->rx_half_close_comp);
+       init_completion(&conn->tx_half_close_comp);
+       init_completion(&conn->rx_login_comp);
+       spin_lock_init(&conn->cmd_lock);
+       spin_lock_init(&conn->conn_usage_lock);
+       spin_lock_init(&conn->immed_queue_lock);
+       spin_lock_init(&conn->nopin_timer_lock);
+       spin_lock_init(&conn->response_queue_lock);
+       spin_lock_init(&conn->state_lock);
+
+       timer_setup(&conn->nopin_response_timer,
+                   iscsit_handle_nopin_response_timeout, 0);
+       timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+
+       if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
+               goto free_conn;
+
+       conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+       if (!conn->conn_ops) {
+               pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
+               goto put_transport;
+       }
+
+       if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+               pr_err("Unable to allocate conn->conn_cpumask\n");
+               goto free_mask;
+       }
+
+       return conn;
+
+free_mask:
+       free_cpumask_var(conn->conn_cpumask);
+put_transport:
+       iscsit_put_transport(conn->conn_transport);
+free_conn:
+       kfree(conn);
+       return NULL;
+}
+
+void iscsit_free_conn(struct iscsi_conn *conn)
+{
+       free_cpumask_var(conn->conn_cpumask);
+       kfree(conn->conn_ops);
+       iscsit_put_transport(conn->conn_transport);
+       kfree(conn);
+}
+
 void iscsi_target_login_sess_out(struct iscsi_conn *conn,
                struct iscsi_np *np, bool zero_tsih, bool new_sess)
 {
@@ -1198,10 +1230,6 @@ old_sess_out:
                crypto_free_ahash(tfm);
        }
 
-       free_cpumask_var(conn->conn_cpumask);
-
-       kfree(conn->conn_ops);
-
        if (conn->param_list) {
                iscsi_release_param_list(conn->param_list);
                conn->param_list = NULL;
@@ -1219,8 +1247,7 @@ old_sess_out:
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
-       iscsit_put_transport(conn->conn_transport);
-       kfree(conn);
+       iscsit_free_conn(conn);
 }
 
 static int __iscsi_target_login_thread(struct iscsi_np *np)
@@ -1250,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        }
        spin_unlock_bh(&np->np_thread_lock);
 
-       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       conn = iscsit_alloc_conn(np);
        if (!conn) {
-               pr_err("Could not allocate memory for"
-                       " new connection\n");
                /* Get another socket */
                return 1;
        }
-       pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
-       conn->conn_state = TARG_CONN_STATE_FREE;
-
-       timer_setup(&conn->nopin_response_timer,
-                   iscsit_handle_nopin_response_timeout, 0);
-       timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
-
-       if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
-               kfree(conn);
-               return 1;
-       }
 
        rc = np->np_transport->iscsit_accept_np(np, conn);
        if (rc == -ENOSYS) {
                complete(&np->np_restart_comp);
-               iscsit_put_transport(conn->conn_transport);
-               kfree(conn);
-               conn = NULL;
+               iscsit_free_conn(conn);
                goto exit;
        } else if (rc < 0) {
                spin_lock_bh(&np->np_thread_lock);
@@ -1282,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                        np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                        spin_unlock_bh(&np->np_thread_lock);
                        complete(&np->np_restart_comp);
-                       iscsit_put_transport(conn->conn_transport);
-                       kfree(conn);
-                       conn = NULL;
+                       iscsit_free_conn(conn);
                        /* Get another socket */
                        return 1;
                }
                spin_unlock_bh(&np->np_thread_lock);
-               iscsit_put_transport(conn->conn_transport);
-               kfree(conn);
-               conn = NULL;
-               goto out;
+               iscsit_free_conn(conn);
+               return 1;
        }
        /*
         * Perform the remaining iSCSI connection initialization items..
@@ -1442,7 +1450,6 @@ old_sess_out:
                tpg_np = NULL;
        }
 
-out:
        return 1;
 
 exit:
index 74ac3ab..3b8e363 100644 (file)
@@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *,
 extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern void iscsit_free_conn(struct iscsi_conn *);
 extern int iscsit_start_kthreads(struct iscsi_conn *);
 extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
index 5414c4a..27284a2 100644 (file)
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                return -EIO;
 
        while (count > 0) {
+               int ret = 0;
+
                spin_lock_irqsave(&hp->lock, flags);
 
                rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                }
 
                if (hp->n_outbuf > 0)
-                       hvc_push(hp);
+                       ret = hvc_push(hp);
 
                spin_unlock_irqrestore(&hp->lock, flags);
 
+               if (!ret)
+                       break;
+
                if (count) {
                        if (hp->n_outbuf > 0)
                                hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
 #define MAX_TIMEOUT            (2000)
 static u32 timeout = MIN_TIMEOUT;
 
+/*
+ * Maximum number of bytes to get from the console driver if hvc_poll is
+ * called from driver (and can't sleep). Any more than this and we break
+ * and start polling with khvcd. This value was derived from from an OpenBMC
+ * console with the OPAL driver that results in about 0.25ms interrupts off
+ * latency.
+ */
+#define HVC_ATOMIC_READ_MAX    128
+
 #define HVC_POLL_READ  0x00000001
 #define HVC_POLL_WRITE 0x00000002
 
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
        if (!hp->irq_requested)
                poll_mask |= HVC_POLL_READ;
 
+ read_again:
        /* Read data if any */
-
        count = tty_buffer_request_room(&hp->port, N_INBUF);
 
        /* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
 #endif /* CONFIG_MAGIC_SYSRQ */
                tty_insert_flip_char(&hp->port, buf[i], 0);
        }
-       if (n == count)
-               poll_mask |= HVC_POLL_READ;
-       read_total = n;
+       read_total += n;
+
+       if (may_sleep) {
+               /* Keep going until the flip is full */
+               spin_unlock_irqrestore(&hp->lock, flags);
+               cond_resched();
+               spin_lock_irqsave(&hp->lock, flags);
+               goto read_again;
+       } else if (read_total < HVC_ATOMIC_READ_MAX) {
+               /* Break and defer if it's a large read in atomic */
+               goto read_again;
+       }
+
+       /*
+        * Latency break, schedule another poll immediately.
+        */
+       poll_mask |= HVC_POLL_READ;
 
  out:
        /* Wakeup write queue if necessary */
index 27346d6..f9b40a9 100644 (file)
@@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty,
        }
 
        if (acm->susp_count) {
-               if (acm->putbuffer) {
-                       /* now to preserve order */
-                       usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
-                       acm->putbuffer = NULL;
-               }
                usb_anchor_urb(wb->urb, &acm->delayed);
                spin_unlock_irqrestore(&acm->write_lock, flags);
                return count;
-       } else {
-               if (acm->putbuffer) {
-                       /* at this point there is no good way to handle errors */
-                       acm_start_wb(acm, acm->putbuffer);
-                       acm->putbuffer = NULL;
-               }
        }
 
        stat = acm_start_wb(acm, wb);
@@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty,
        return count;
 }
 
-static void acm_tty_flush_chars(struct tty_struct *tty)
-{
-       struct acm *acm = tty->driver_data;
-       struct acm_wb *cur;
-       int err;
-       unsigned long flags;
-
-       spin_lock_irqsave(&acm->write_lock, flags);
-
-       cur = acm->putbuffer;
-       if (!cur) /* nothing to do */
-               goto out;
-
-       acm->putbuffer = NULL;
-       err = usb_autopm_get_interface_async(acm->control);
-       if (err < 0) {
-               cur->use = 0;
-               acm->putbuffer = cur;
-               goto out;
-       }
-
-       if (acm->susp_count)
-               usb_anchor_urb(cur->urb, &acm->delayed);
-       else
-               acm_start_wb(acm, cur);
-out:
-       spin_unlock_irqrestore(&acm->write_lock, flags);
-       return;
-}
-
-static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
-{
-       struct acm *acm = tty->driver_data;
-       struct acm_wb *cur;
-       int wbn;
-       unsigned long flags;
-
-overflow:
-       cur = acm->putbuffer;
-       if (!cur) {
-               spin_lock_irqsave(&acm->write_lock, flags);
-               wbn = acm_wb_alloc(acm);
-               if (wbn >= 0) {
-                       cur = &acm->wb[wbn];
-                       acm->putbuffer = cur;
-               }
-               spin_unlock_irqrestore(&acm->write_lock, flags);
-               if (!cur)
-                       return 0;
-       }
-
-       if (cur->len == acm->writesize) {
-               acm_tty_flush_chars(tty);
-               goto overflow;
-       }
-
-       cur->buf[cur->len++] = ch;
-       return 1;
-}
-
 static int acm_tty_write_room(struct tty_struct *tty)
 {
        struct acm *acm = tty->driver_data;
@@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = {
        .cleanup =              acm_tty_cleanup,
        .hangup =               acm_tty_hangup,
        .write =                acm_tty_write,
-       .put_char =             acm_tty_put_char,
-       .flush_chars =          acm_tty_flush_chars,
        .write_room =           acm_tty_write_room,
        .ioctl =                acm_tty_ioctl,
        .throttle =             acm_tty_throttle,
index eacc116..ca06b20 100644 (file)
@@ -96,7 +96,6 @@ struct acm {
        unsigned long read_urbs_free;
        struct urb *read_urbs[ACM_NR];
        struct acm_rb read_buffers[ACM_NR];
-       struct acm_wb *putbuffer;                       /* for acm_tty_put_char() */
        int rx_buflimit;
        spinlock_t read_lock;
        u8 *notification_buffer;                        /* to reassemble fragmented notifications */
index bec581f..656d247 100644 (file)
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
 
        set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
-       rv = usb_submit_urb(desc->response, GFP_KERNEL);
+       rv = usb_submit_urb(desc->response, GFP_ATOMIC);
        spin_lock_irq(&desc->iuspin);
        if (rv) {
                dev_err(&desc->intf->dev,
index 50a2362..48277bb 100644 (file)
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
 
+/**
+ * usb_of_get_companion_dev - Find the companion device
+ * @dev: the device pointer to find a companion
+ *
+ * Find the companion device from platform bus.
+ *
+ * Takes a reference to the returned struct device which needs to be dropped
+ * after use.
+ *
+ * Return: On success, a pointer to the companion device, %NULL on failure.
+ */
+struct device *usb_of_get_companion_dev(struct device *dev)
+{
+       struct device_node *node;
+       struct platform_device *pdev = NULL;
+
+       node = of_parse_phandle(dev->of_node, "companion", 0);
+       if (node)
+               pdev = of_find_device_by_node(node);
+
+       of_node_put(node);
+
+       return pdev ? &pdev->dev : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
 #endif
 
 MODULE_LICENSE("GPL");
index 66fe1b7..0343246 100644 (file)
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
                                event == PM_EVENT_RESTORE);
                if (retval) {
                        dev_err(dev, "PCI post-resume error %d!\n", retval);
-                       if (hcd->shared_hcd)
-                               usb_hc_died(hcd->shared_hcd);
                        usb_hc_died(hcd);
                }
        }
index 228672f..bfa5eda 100644 (file)
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
  * is submitted that needs that bandwidth.  Some other operating systems
  * allocate bandwidth early, when a configuration is chosen.
  *
+ * xHCI reserves bandwidth and configures the alternate setting in
+ * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
+ * may be disabled. Drivers cannot rely on any particular alternate
+ * setting being in effect after a failure.
+ *
  * This call is synchronous, and may not be used in an interrupt context.
  * Also, drivers must not change altsettings while urbs are scheduled for
  * endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
                         alternate);
                return -EINVAL;
        }
+       /*
+        * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+        * including freeing dropped endpoint ring buffers.
+        * Make sure the interface endpoints are flushed before that
+        */
+       usb_disable_interface(dev, iface, false);
 
        /* Make sure we have enough bandwidth for this alternate interface.
         * Remove the current alt setting and add the new alt setting.
index fd77442..651708d 100644 (file)
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
        return NULL;
 }
 EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
-
-/**
- * usb_of_get_companion_dev - Find the companion device
- * @dev: the device pointer to find a companion
- *
- * Find the companion device from platform bus.
- *
- * Takes a reference to the returned struct device which needs to be dropped
- * after use.
- *
- * Return: On success, a pointer to the companion device, %NULL on failure.
- */
-struct device *usb_of_get_companion_dev(struct device *dev)
-{
-       struct device_node *node;
-       struct platform_device *pdev = NULL;
-
-       node = of_parse_phandle(dev->of_node, "companion", 0);
-       if (node)
-               pdev = of_find_device_by_node(node);
-
-       of_node_put(node);
-
-       return pdev ? &pdev->dev : NULL;
-}
-EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
index 097057d..e77dfe5 100644 (file)
@@ -178,6 +178,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
+       { USB_DEVICE(0x0218, 0x0201), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* WORLDE easy key (easykey.25) MIDI controller  */
        { USB_DEVICE(0x0218, 0x0401), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +410,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x2040, 0x7200), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* DJI CineSSD */
+       { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 9a53a58..5776428 100644 (file)
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
        dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
                (unsigned long)res->start, hsotg->regs);
 
-       hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
-
        retval = dwc2_lowlevel_hw_init(hsotg);
        if (retval)
                return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (retval)
                return retval;
 
+       hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
+
        retval = dwc2_get_dr_mode(hsotg);
        if (retval)
                goto error;
index 40bf9e0..4c2771c 100644 (file)
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int dwc3_of_simple_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
 {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
        int                     i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
 {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
        int                     ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
 {
        struct dwc3_of_simple *simple = dev_get_drvdata(dev);
 
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
 {
        struct dwc3_of_simple *simple = dev_get_drvdata(dev);
 
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
index 5edd794..1286076 100644 (file)
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
        u32             value;
 
        reg = pcim_iomap(pci, GP_RWBAR, 0);
-       if (IS_ERR(reg))
-               return PTR_ERR(reg);
+       if (!reg)
+               return -ENOMEM;
 
        value = readl(reg + GP_RWREG1);
        if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
index 032ea7d..2b53194 100644 (file)
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
 
 /**
  * dwc3_gadget_start_config - configure ep resources
- * @dwc: pointer to our controller context structure
  * @dep: endpoint that is being enabled
  *
  * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
index 53a48f5..587c503 100644 (file)
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
 static int fotg210_udc_remove(struct platform_device *pdev)
 {
        struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+       int i;
 
        usb_del_gadget_udc(&fotg210->gadget);
        iounmap(fotg210->reg);
        free_irq(platform_get_irq(pdev, 0), fotg210);
 
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
        return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        /* initialize udc */
        fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
        if (fotg210 == NULL)
-               goto err_alloc;
+               goto err;
 
        for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
                _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->reg = ioremap(res->start, resource_size(res));
        if (fotg210->reg == NULL) {
                pr_err("ioremap error.\n");
-               goto err_map;
+               goto err_alloc;
        }
 
        spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
                                GFP_KERNEL);
        if (fotg210->ep0_req == NULL)
-               goto err_req;
+               goto err_map;
 
        fotg210_init(fotg210);
 
@@ -1187,12 +1190,14 @@ err_req:
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
 
 err_map:
-       if (fotg210->reg)
-               iounmap(fotg210->reg);
+       iounmap(fotg210->reg);
 
 err_alloc:
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
+err:
        return ret;
 }
 
index 318246d..b02ab2a 100644 (file)
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
                writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
        } else {
                writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
-               stop_activity(dev, dev->driver);
+               stop_activity(dev, NULL);
        }
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
+       if (!is_on && dev->driver)
+               dev->driver->disconnect(&dev->gadget);
+
        return 0;
 }
 
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
                nuke(&dev->ep[i]);
 
        /* report disconnect; the driver is already quiesced */
-       if (driver)
+       if (driver) {
+               spin_unlock(&dev->lock);
                driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
 
        usb_reinit(dev);
 }
@@ -3341,6 +3347,8 @@ next_endpoints:
                BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
 {
        struct net2280_ep       *ep;
        u32                     tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
                        if (disconnect || reset) {
                                stop_activity(dev, dev->driver);
                                ep0_start(dev);
+                               spin_unlock(&dev->lock);
                                if (reset)
                                        usb_gadget_udc_reset
                                                (&dev->gadget, dev->driver);
                                else
                                        (dev->driver->disconnect)
                                                (&dev->gadget);
+                               spin_lock(&dev->lock);
                                return;
                        }
                }
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
        tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
        if (stat & tmp) {
                writel(tmp, &dev->regs->irqstat1);
+               spin_unlock(&dev->lock);
                if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
                        if (dev->driver->suspend)
                                dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
                                dev->driver->resume(&dev->gadget);
                        /* at high speed, note erratum 0133 */
                }
+               spin_lock(&dev->lock);
                stat &= ~tmp;
        }
 
index 1f879b3..e1656f3 100644 (file)
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
        switch (speed) {
        case USB_STA_SPEED_SS:
                usb3->gadget.speed = USB_SPEED_SUPER;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
                break;
        case USB_STA_SPEED_HS:
                usb3->gadget.speed = USB_SPEED_HIGH;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
                break;
        case USB_STA_SPEED_FS:
                usb3->gadget.speed = USB_SPEED_FULL;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
                break;
        default:
                usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
                        /* for control pipe */
                        usb3->gadget.ep0 = &usb3_ep->ep;
                        usb_ep_set_maxpacket_limit(&usb3_ep->ep,
-                                               USB3_EP0_HSFS_MAX_PACKET_SIZE);
+                                               USB3_EP0_SS_MAX_PACKET_SIZE);
                        usb3_ep->ep.caps.type_control = true;
                        usb3_ep->ep.caps.dir_in = true;
                        usb3_ep->ep.caps.dir_out = true;
index 072bd5d..5b8a3d9 100644 (file)
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
        } else {
                int frame = 0;
                dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
-               msleep(100);
+               mdelay(100);
                return frame;
        }
 }
index ef350c3..b1f27aa 100644 (file)
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
        in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
        in_ep_ctx->deq = out_ep_ctx->deq;
        in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+       if (xhci->quirks & XHCI_MTK_HOST) {
+               in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
+               in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
+       }
 }
 
 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
index 8dc77e3..94e9392 100644 (file)
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
 {
        const struct xhci_plat_priv *priv_match;
        const struct hc_driver  *driver;
-       struct device           *sysdev;
+       struct device           *sysdev, *tmpdev;
        struct xhci_hcd         *xhci;
        struct resource         *res;
        struct usb_hcd          *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
                goto disable_clk;
        }
 
-       if (device_property_read_bool(sysdev, "usb2-lpm-disable"))
-               xhci->quirks |= XHCI_HW_LPM_DISABLE;
+       /* imod_interval is the interrupt moderation value in nanoseconds. */
+       xhci->imod_interval = 40000;
 
-       if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
-               xhci->quirks |= XHCI_LPM_SUPPORT;
+       /* Iterate over all parent nodes for finding quirks */
+       for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
 
-       if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
-               xhci->quirks |= XHCI_BROKEN_PORT_PED;
+               if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
+                       xhci->quirks |= XHCI_HW_LPM_DISABLE;
 
-       /* imod_interval is the interrupt moderation value in nanoseconds. */
-       xhci->imod_interval = 40000;
-       device_property_read_u32(sysdev, "imod-interval-ns",
-                                &xhci->imod_interval);
+               if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
+                       xhci->quirks |= XHCI_LPM_SUPPORT;
+
+               if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+                       xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
+               device_property_read_u32(tmpdev, "imod-interval-ns",
+                                        &xhci->imod_interval);
+       }
 
        hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
        if (IS_ERR(hcd->usb_phy)) {
index 61f48b1..0420eef 100644 (file)
@@ -37,6 +37,21 @@ static unsigned long long quirks;
 module_param(quirks, ullong, S_IRUGO);
 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
 
+static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+{
+       struct xhci_segment *seg = ring->first_seg;
+
+       if (!td || !td->start_seg)
+               return false;
+       do {
+               if (seg == td->start_seg)
+                       return true;
+               seg = seg->next;
+       } while (seg && seg != ring->first_seg);
+
+       return false;
+}
+
 /* TODO: copied from ehci-hcd.c - can this be refactored? */
 /*
  * xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                goto done;
        }
 
+       /*
+        * check ring is not re-allocated since URB was enqueued. If it is, then
+        * make sure none of the ring related pointers in this URB private data
+        * are touched, such as td_list, otherwise we overwrite freed data
+        */
+       if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
+               xhci_err(xhci, "Canceled URB td not found on endpoint ring");
+               for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
+                       td = &urb_priv->td[i];
+                       if (!list_empty(&td->cancelled_td_list))
+                               list_del_init(&td->cancelled_td_list);
+               }
+               goto err_giveback;
+       }
+
        if (xhci->xhc_state & XHCI_STATE_HALTED) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "HC halted, freeing TD manually.");
index 82f2206..b5d6616 100644 (file)
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
        mask &= 0x0f;
        val &= 0x0f;
        d = (priv->reg[1] & (~mask)) ^ val;
-       if (set_1284_register(pp, 2, d, GFP_KERNEL))
+       if (set_1284_register(pp, 2, d, GFP_ATOMIC))
                return 0;
        priv->reg[1] = d;
        return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
 {
        unsigned char ret;
 
-       if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
+       if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
                return 0;
        return ret & 0xf8;
 }
index 3be40ea..6d9fd5f 100644 (file)
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        spin_unlock_irqrestore(&dev->lock, flags);
        mutex_unlock(&dev->io_mutex);
 
+       if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+               return -EIO;
+
        return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
 {
        struct usb_yurex *dev;
        int i, set = 0, retval = 0;
-       char buffer[16];
+       char buffer[16 + 1];
        char *data = buffer;
        unsigned long long c, c2 = 0;
        signed long timeout = 0;
        DEFINE_WAIT(wait);
 
-       count = min(sizeof(buffer), count);
+       count = min(sizeof(buffer) - 1, count);
        dev = file->private_data;
 
        /* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                retval = -EFAULT;
                goto error;
        }
+       buffer[count] = 0;
        memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
 
        switch (buffer[0]) {
index eecfd06..d045d84 100644 (file)
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
                (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
                SSUSB_U2_PORT_HOST_SEL));
 
-       if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+       if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
                mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+               if (mtu->is_u3_ip)
+                       mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+                                    SSUSB_U3_PORT_DUAL_MODE);
+       }
 
        return ssusb_check_clocks(mtu->ssusb, check_clk);
 }
index 6ee3714..a45bb25 100644 (file)
 
 /* U3D_SSUSB_U3_CTRL_0P */
 #define SSUSB_U3_PORT_SSP_SPEED        BIT(9)
+#define SSUSB_U3_PORT_DUAL_MODE        BIT(7)
 #define SSUSB_U3_PORT_HOST_SEL         BIT(2)
 #define SSUSB_U3_PORT_PDN              BIT(1)
 #define SSUSB_U3_PORT_DIS              BIT(0)
index e53c682..9bbcee3 100644 (file)
@@ -173,7 +173,7 @@ struct ump_interrupt {
 }  __attribute__((packed));
 
 
-#define TIUMP_GET_PORT_FROM_CODE(c)    (((c) >> 4) - 3)
+#define TIUMP_GET_PORT_FROM_CODE(c)    (((c) >> 6) & 0x01)
 #define TIUMP_GET_FUNC_FROM_CODE(c)    ((c) & 0x0f)
 #define TIUMP_INTERRUPT_CODE_LSR       0x03
 #define TIUMP_INTERRUPT_CODE_MSR       0x04
index 3010878..e3c5832 100644 (file)
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
 
 static int ti_get_port_from_code(unsigned char code)
 {
-       return (code >> 4) - 3;
+       return (code >> 6) & 0x01;
 }
 
 static int ti_get_func_from_code(unsigned char code)
index c267f28..e227bb5 100644 (file)
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
                return 0;
        }
 
+       if ((us->fflags & US_FL_NO_ATA_1X) &&
+                       (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
+               memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
+                      sizeof(usb_stor_sense_invalidCDB));
+               srb->result = SAM_STAT_CHECK_CONDITION;
+               done(srb);
+               return 0;
+       }
+
        /* enqueue the command and wake up the control thread */
        srb->scsi_done = done;
        us->srb = srb;
index 9e9de54..1f7b401 100644 (file)
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
                sdev->skip_ms_page_8 = 1;
                sdev->wce_default_on = 1;
        }
+
+       /*
+        * Some disks return the total number of blocks in response
+        * to READ CAPACITY rather than the highest block number.
+        * If this device makes that mistake, tell the sd driver.
+        */
+       if (devinfo->flags & US_FL_FIX_CAPACITY)
+               sdev->fix_capacity = 1;
+
+       /*
+        * Some devices don't like MODE SENSE with page=0x3f,
+        * which is the command used for checking if a device
+        * is write-protected.  Now that we tell the sd driver
+        * to do a 192-byte transfer with this command the
+        * majority of devices work fine, but a few still can't
+        * handle it.  The sd driver will simply assume those
+        * devices are write-enabled.
+        */
+       if (devinfo->flags & US_FL_NO_WP_DETECT)
+               sdev->skip_ms_page_3f = 1;
+
        scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
index 22fcfcc..f7f83b2 100644 (file)
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV(  0x2735, 0x100b, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_GO_SLOW ),
 
+/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
+UNUSUAL_DEV(  0x2ca3, 0x0031, 0x0000, 0x9999,
+               "DJI",
+               "CineSSD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /*
  * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
  * Mio Moov 330
index 95a2b10..76299b6 100644 (file)
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
 /* API for the port drivers */
 
 /**
- * typec_match_altmode - Match SVID to an array of alternate modes
+ * typec_match_altmode - Match SVID and mode to an array of alternate modes
  * @altmodes: Array of alternate modes
- * @n: Number of elements in the array, or -1 for NULL termiated arrays
+ * @n: Number of elements in the array, or -1 for NULL terminated arrays
  * @svid: Standard or Vendor ID to match with
+ * @mode: Mode to match with
  *
- * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no
+ * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
  * match is found.
  */
 struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
index c202975..e61dffb 100644 (file)
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
  * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
  * @port: USB Type-C Port that supports the alternate mode
  * @desc: Description of the alternate mode
- * @drvdata: Private pointer to driver specific info
  *
  * This routine is used to register an alternate mode that @port is capable of
  * supporting.
index b459edf..90d387b 100644 (file)
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
          This value is used to allocate enough space in internal
          tables needed for physical memory administration.
 
-config XEN_SCRUB_PAGES
-       bool "Scrub pages before returning them to system"
+config XEN_SCRUB_PAGES_DEFAULT
+       bool "Scrub pages before returning them to system by default"
        depends on XEN_BALLOON
        default y
        help
          Scrub pages before returning them to the system for reuse by
          other domains.  This makes sure that any confidential data
          is not accidentally visible to other domains.  Is it more
-         secure, but slightly less efficient.
+         secure, but slightly less efficient. This can be controlled with
+         xen_scrub_pages=0 parameter and
+         /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+         This option only sets the default value.
+
          If in doubt, say yes.
 
 config XEN_DEV_EVTCHN
index d4265c8..b1357aa 100644 (file)
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
 
 static void disable_hotplug_cpu(int cpu)
 {
-       if (cpu_online(cpu)) {
-               lock_device_hotplug();
+       if (!cpu_is_hotpluggable(cpu))
+               return;
+       lock_device_hotplug();
+       if (cpu_online(cpu))
                device_offline(get_cpu_device(cpu));
-               unlock_device_hotplug();
-       }
-       if (cpu_present(cpu))
+       if (!cpu_online(cpu) && cpu_present(cpu)) {
                xen_arch_unregister_cpu(cpu);
-
-       set_cpu_present(cpu, false);
+               set_cpu_present(cpu, false);
+       }
+       unlock_device_hotplug();
 }
 
 static int vcpu_online(unsigned int cpu)
index 08e4af0..e6c1934 100644 (file)
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
                clear_evtchn_to_irq_row(row);
        }
 
-       evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+       evtchn_to_irq[row][col] = irq;
        return 0;
 }
 
index 57390c7..b0b02a5 100644 (file)
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
        return true;
 }
 
-static void unmap_if_in_range(struct gntdev_grant_map *map,
-                             unsigned long start, unsigned long end)
+static int unmap_if_in_range(struct gntdev_grant_map *map,
+                             unsigned long start, unsigned long end,
+                             bool blockable)
 {
        unsigned long mstart, mend;
        int err;
 
+       if (!in_range(map, start, end))
+               return 0;
+
+       if (!blockable)
+               return -EAGAIN;
+
        mstart = max(start, map->vma->vm_start);
        mend   = min(end,   map->vma->vm_end);
        pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
                                (mend - mstart) >> PAGE_SHIFT);
        WARN_ON(err);
+
+       return 0;
 }
 
 static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
        struct gntdev_grant_map *map;
        int ret = 0;
 
-       /* TODO do we really need a mutex here? */
        if (blockable)
                mutex_lock(&priv->lock);
        else if (!mutex_trylock(&priv->lock))
                return -EAGAIN;
 
        list_for_each_entry(map, &priv->maps, next) {
-               if (in_range(map, start, end)) {
-                       ret = -EAGAIN;
+               ret = unmap_if_in_range(map, start, end, blockable);
+               if (ret)
                        goto out_unlock;
-               }
-               unmap_if_in_range(map, start, end);
        }
        list_for_each_entry(map, &priv->freeable_maps, next) {
-               if (in_range(map, start, end)) {
-                       ret = -EAGAIN;
+               ret = unmap_if_in_range(map, start, end, blockable);
+               if (ret)
                        goto out_unlock;
-               }
-               unmap_if_in_range(map, start, end);
        }
 
 out_unlock:
index c93d8ef..5bb01a6 100644 (file)
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                /*
                 * The Xenstore watch fires directly after registering it and
                 * after a suspend/resume cycle. So ENOENT is no error but
-                * might happen in those cases.
+                * might happen in those cases. ERANGE is observed when we get
+                * an empty value (''), this happens when we acknowledge the
+                * request by writing '\0' below.
                 */
-               if (err != -ENOENT)
+               if (err != -ENOENT && err != -ERANGE)
                        pr_err("Error %d reading sysrq code in control/sysrq\n",
                               err);
                xenbus_transaction_end(xbt, 1);
index 084799c..3782cf0 100644 (file)
 
 #include <xen/interface/memory.h>
 #include <xen/mem-reservation.h>
+#include <linux/moduleparam.h>
+
+bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
+core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
 
 /*
  * Use one extent per PAGE_SIZE to avoid to break down the page into
index 294f35c..63c1494 100644 (file)
@@ -44,6 +44,7 @@
 #include <xen/xenbus.h>
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/mem-reservation.h>
 
 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
 
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
 static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
 static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
 static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
 
 static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
                              char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
        &dev_attr_max_schedule_delay.attr.attr,
        &dev_attr_retry_count.attr.attr,
        &dev_attr_max_retry_count.attr.attr,
+       &dev_attr_scrub_pages.attr.attr,
        NULL
 };
 
index 0c3285c..476dcbb 100644 (file)
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
                goto inval;
 
        args = strchr(name, ' ');
-       if (!args)
-               goto inval;
-       do {
-               *args++ = 0;
-       } while(*args == ' ');
-       if (!*args)
-               goto inval;
+       if (args) {
+               do {
+                       *args++ = 0;
+               } while(*args == ' ');
+               if (!*args)
+                       goto inval;
+       }
 
        /* determine command to perform */
        _debug("cmd=%s name=%s args=%s", buf, name, args);
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
 
                if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
                        afs_put_cell(net, cell);
-               printk("kAFS: Added new cell '%s'\n", name);
        } else {
                goto inval;
        }
index 53af9f5..2cddfe7 100644 (file)
@@ -1280,6 +1280,7 @@ struct btrfs_root {
        int send_in_progress;
        struct btrfs_subvolume_writers *subv_writers;
        atomic_t will_be_snapshotted;
+       atomic_t snapshot_force_cow;
 
        /* For qgroup metadata reserved space */
        spinlock_t qgroup_meta_rsv_lock;
@@ -3390,9 +3391,9 @@ do {                                                                      \
 #define btrfs_debug(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #endif
@@ -3404,6 +3405,13 @@ do {                                                     \
        rcu_read_unlock();                              \
 } while (0)
 
+#define btrfs_no_printk_in_rcu(fs_info, fmt, args...)  \
+do {                                                   \
+       rcu_read_lock();                                \
+       btrfs_no_printk(fs_info, fmt, ##args);          \
+       rcu_read_unlock();                              \
+} while (0)
+
 #define btrfs_printk_ratelimited(fs_info, fmt, args...)                \
 do {                                                           \
        static DEFINE_RATELIMIT_STATE(_rs,                      \
index 5124c15..05dc3c1 100644 (file)
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        atomic_set(&root->log_batch, 0);
        refcount_set(&root->refs, 1);
        atomic_set(&root->will_be_snapshotted, 0);
+       atomic_set(&root->snapshot_force_cow, 0);
        root->log_transid = 0;
        root->log_transid_committed = -1;
        root->last_log_commit = 0;
index de6f75f..2d90742 100644 (file)
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * root: the root of the parent directory
  * rsv: block reservation
  * items: the number of items that we need do reservation
- * qgroup_reserved: used to return the reserved size in qgroup
+ * use_global_rsv: allow fallback to the global block reservation
  *
  * This function is used to reserve the space for snapshot/subvolume
  * creation and deletion. Those operations are different with the
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * the space reservation mechanism in start_transaction().
  */
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
-                                    struct btrfs_block_rsv *rsv,
-                                    int items,
+                                    struct btrfs_block_rsv *rsv, int items,
                                     bool use_global_rsv)
 {
+       u64 qgroup_num_bytes = 0;
        u64 num_bytes;
        int ret;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 
        if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
                /* One for parent inode, two for dir entries */
-               num_bytes = 3 * fs_info->nodesize;
-               ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+               qgroup_num_bytes = 3 * fs_info->nodesize;
+               ret = btrfs_qgroup_reserve_meta_prealloc(root,
+                               qgroup_num_bytes, true);
                if (ret)
                        return ret;
-       } else {
-               num_bytes = 0;
        }
 
        num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
        if (ret == -ENOSPC && use_global_rsv)
                ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
 
-       if (ret && num_bytes)
-               btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+       if (ret && qgroup_num_bytes)
+               btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
 
        return ret;
 }
index 9357a19..3ea5339 100644 (file)
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        u64 disk_num_bytes;
        u64 ram_bytes;
        int extent_type;
-       int ret, err;
+       int ret;
        int type;
        int nocow;
        int check_prev = 1;
@@ -1403,11 +1403,8 @@ next_slot:
                         * if there are pending snapshots for this root,
                         * we fall into common COW way.
                         */
-                       if (!nolock) {
-                               err = btrfs_start_write_no_snapshotting(root);
-                               if (!err)
-                                       goto out_check;
-                       }
+                       if (!nolock && atomic_read(&root->snapshot_force_cow))
+                               goto out_check;
                        /*
                         * force cow if csum exists in the range.
                         * this ensure that csum for a given extent are
@@ -1416,9 +1413,6 @@ next_slot:
                        ret = csum_exist_in_range(fs_info, disk_bytenr,
                                                  num_bytes);
                        if (ret) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
-
                                /*
                                 * ret could be -EIO if the above fails to read
                                 * metadata.
@@ -1431,11 +1425,8 @@ next_slot:
                                WARN_ON_ONCE(nolock);
                                goto out_check;
                        }
-                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
+                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
                                goto out_check;
-                       }
                        nocow = 1;
                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
                        extent_end = found_key.offset +
@@ -1448,8 +1439,6 @@ next_slot:
 out_check:
                if (extent_end <= start) {
                        path->slots[0]++;
-                       if (!nolock && nocow)
-                               btrfs_end_write_no_snapshotting(root);
                        if (nocow)
                                btrfs_dec_nocow_writers(fs_info, disk_bytenr);
                        goto next_slot;
@@ -1471,8 +1460,6 @@ out_check:
                                             end, page_started, nr_written, 1,
                                             NULL);
                        if (ret) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1492,8 +1479,6 @@ out_check:
                                          ram_bytes, BTRFS_COMPRESS_NONE,
                                          BTRFS_ORDERED_PREALLOC);
                        if (IS_ERR(em)) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1532,8 +1517,6 @@ out_check:
                                             EXTENT_CLEAR_DATA_RESV,
                                             PAGE_UNLOCK | PAGE_SET_PRIVATE2);
 
-               if (!nolock && nocow)
-                       btrfs_end_write_no_snapshotting(root);
                cur_offset = extent_end;
 
                /*
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                drop_inode = 1;
        } else {
                struct dentry *parent = dentry->d_parent;
+               int ret;
+
                err = btrfs_update_inode(trans, root, inode);
                if (err)
                        goto fail;
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                                goto fail;
                }
                d_instantiate(dentry, inode);
-               btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
+                                        true, NULL);
+               if (ret == BTRFS_NEED_TRANS_COMMIT) {
+                       err = btrfs_commit_transaction(trans);
+                       trans = NULL;
+               }
        }
 
 fail:
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
-       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       struct btrfs_log_ctx ctx_root;
+       struct btrfs_log_ctx ctx_dest;
+       bool sync_log_root = false;
+       bool sync_log_dest = false;
+       bool commit_transaction = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
                return -EXDEV;
 
+       btrfs_init_log_ctx(&ctx_root, old_inode);
+       btrfs_init_log_ctx(&ctx_dest, new_inode);
+
        /* close the race window with snapshot create/destroy ioctl */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&fs_info->subvol_sem);
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        if (root_log_pinned) {
                parent = new_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx_root);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log_root = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                root_log_pinned = false;
        }
        if (dest_log_pinned) {
-               parent = old_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
-                               parent);
+               if (!commit_transaction) {
+                       parent = old_dentry->d_parent;
+                       ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
+                                                BTRFS_I(new_dir), parent,
+                                                false, &ctx_dest);
+                       if (ret == BTRFS_NEED_LOG_SYNC)
+                               sync_log_dest = true;
+                       else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                               commit_transaction = true;
+                       ret = 0;
+               }
                btrfs_end_log_trans(dest);
                dest_log_pinned = false;
        }
@@ -9583,8 +9594,26 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret2 = btrfs_end_transaction(trans);
-       ret = ret ? ret : ret2;
+       if (!ret && sync_log_root && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
+                                    &ctx_root);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (!ret && sync_log_dest && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
+                                    &ctx_dest);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        int ret;
        u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
        bool log_pinned = false;
+       struct btrfs_log_ctx ctx;
+       bool sync_log = false;
+       bool commit_transaction = false;
 
        if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return -EPERM;
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (log_pinned) {
                struct dentry *parent = new_dentry->d_parent;
 
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               btrfs_init_log_ctx(&ctx, old_inode);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
@@ -9856,7 +9895,19 @@ out_fail:
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
-       btrfs_end_transaction(trans);
+       if (!ret && sync_log) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index 63600dc..d60b6ca 100644 (file)
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        struct btrfs_pending_snapshot *pending_snapshot;
        struct btrfs_trans_handle *trans;
        int ret;
+       bool snapshot_force_cow = false;
 
        if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                return -EINVAL;
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                goto free_pending;
        }
 
+       /*
+        * Force new buffered writes to reserve space even when NOCOW is
+        * possible. This is to avoid later writeback (running dealloc) to
+        * fallback to COW mode and unexpectedly fail with ENOSPC.
+        */
        atomic_inc(&root->will_be_snapshotted);
        smp_mb__after_atomic();
        /* wait for no snapshot writes */
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                goto dec_and_free;
 
+       /*
+        * All previous writes have started writeback in NOCOW mode, so now
+        * we force future writes to fallback to COW mode during snapshot
+        * creation.
+        */
+       atomic_inc(&root->snapshot_force_cow);
+       snapshot_force_cow = true;
+
        btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
 
        btrfs_init_block_rsv(&pending_snapshot->block_rsv,
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 fail:
        btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
 dec_and_free:
+       if (snapshot_force_cow)
+               atomic_dec(&root->snapshot_force_cow);
        if (atomic_dec_and_test(&root->will_be_snapshotted))
                wake_up_var(&root->will_be_snapshotted);
 free_pending:
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
 
                same_lock_start = min_t(u64, loff, dst_loff);
                same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+       } else {
+               /*
+                * If the source and destination inodes are different, the
+                * source's range end offset matches the source's i_size, that
+                * i_size is not a multiple of the sector size, and the
+                * destination range does not go past the destination's i_size,
+                * we must round down the length to the nearest sector size
+                * multiple. If we don't do this adjustment we end replacing
+                * with zeroes the bytes in the range that starts at the
+                * deduplication range's end offset and ends at the next sector
+                * size multiple.
+                */
+               if (loff + olen == i_size_read(src) &&
+                   dst_loff + len < i_size_read(dst)) {
+                       const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
+
+                       len = round_down(i_size_read(src), sz) - loff;
+                       olen = len;
+               }
        }
 
 again:
index 4353bb6..d4917c0 100644 (file)
@@ -1019,10 +1019,9 @@ out_add_root:
        spin_unlock(&fs_info->qgroup_lock);
 
        ret = btrfs_commit_transaction(trans);
-       if (ret) {
-               trans = NULL;
+       trans = NULL;
+       if (ret)
                goto out_free_path;
-       }
 
        ret = qgroup_rescan_init(fs_info, 0, 1);
        if (!ret) {
index 1650dc4..3c2ae0e 100644 (file)
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
  * Call this after adding a new name for a file and it will properly
  * update the log to reflect the new name.
  *
- * It will return zero if all goes well, and it will return 1 if a
- * full transaction commit is required.
+ * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
+ * true (because it's not used).
+ *
+ * Return value depends on whether @sync_log is true or false.
+ * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *            committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
+ *            otherwise.
+ * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
+ *             to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
+ *             or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *             committed (without attempting to sync the log).
  */
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent)
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
+       int ret;
 
        /*
         * this will force the logging code to walk the dentry chain
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
         */
        if (inode->logged_trans <= fs_info->last_trans_committed &&
            (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
-               return 0;
+               return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
+                       BTRFS_DONT_NEED_LOG_SYNC;
+
+       if (sync_log) {
+               struct btrfs_log_ctx ctx2;
+
+               btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
+               ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                            LOG_INODE_EXISTS, &ctx2);
+               if (ret == BTRFS_NO_LOG_SYNC)
+                       return BTRFS_DONT_NEED_TRANS_COMMIT;
+               else if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+
+               ret = btrfs_sync_log(trans, inode->root, &ctx2);
+               if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+               return BTRFS_DONT_NEED_TRANS_COMMIT;
+       }
+
+       ASSERT(ctx);
+       ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                    LOG_INODE_EXISTS, ctx);
+       if (ret == BTRFS_NO_LOG_SYNC)
+               return BTRFS_DONT_NEED_LOG_SYNC;
+       else if (ret)
+               return BTRFS_NEED_TRANS_COMMIT;
 
-       return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
-                                     LOG_INODE_EXISTS, NULL);
+       return BTRFS_NEED_LOG_SYNC;
 }
 
index 122e68b..7ab9bb8 100644 (file)
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
                             int for_rename);
 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
                                   struct btrfs_inode *dir);
+/* Return values for btrfs_log_new_name() */
+enum {
+       BTRFS_DONT_NEED_TRANS_COMMIT,
+       BTRFS_NEED_TRANS_COMMIT,
+       BTRFS_DONT_NEED_LOG_SYNC,
+       BTRFS_NEED_LOG_SYNC,
+};
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent);
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx);
 
 #endif
index da86706..f4405e4 100644 (file)
@@ -4491,7 +4491,12 @@ again:
 
        /* Now btrfs_update_device() will change the on-disk size. */
        ret = btrfs_update_device(trans, device);
-       btrfs_end_transaction(trans);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       } else {
+               ret = btrfs_commit_transaction(trans);
+       }
 done:
        btrfs_free_path(path);
        if (ret) {
index 43ca3b7..eab1359 100644 (file)
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
 
 /*
  * create a new fs client
+ *
+ * Success or not, this function consumes @fsopt and @opt.
  */
 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
                                        struct ceph_options *opt)
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
        struct ceph_fs_client *fsc;
        int page_count;
        size_t size;
-       int err = -ENOMEM;
+       int err;
 
        fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
-       if (!fsc)
-               return ERR_PTR(-ENOMEM);
+       if (!fsc) {
+               err = -ENOMEM;
+               goto fail;
+       }
 
        fsc->client = ceph_create_client(opt, fsc);
        if (IS_ERR(fsc->client)) {
                err = PTR_ERR(fsc->client);
                goto fail;
        }
+       opt = NULL; /* fsc->client now owns this */
 
        fsc->client->extra_mon_dispatch = extra_mon_dispatch;
        fsc->client->osdc.abort_on_full = true;
@@ -677,6 +682,9 @@ fail_client:
        ceph_destroy_client(fsc->client);
 fail:
        kfree(fsc);
+       if (opt)
+               ceph_destroy_options(opt);
+       destroy_mount_options(fsopt);
        return ERR_PTR(err);
 }
 
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               destroy_mount_options(fsopt);
-               ceph_destroy_options(opt);
                goto out_final;
        }
 
index 35c83fe..abcd78e 100644 (file)
@@ -6,6 +6,7 @@ config CIFS
        select CRYPTO_MD4
        select CRYPTO_MD5
        select CRYPTO_SHA256
+       select CRYPTO_SHA512
        select CRYPTO_CMAC
        select CRYPTO_HMAC
        select CRYPTO_ARC4
index b380e08..a2b2355 100644 (file)
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_LESSTHAN:
                *target = '<';
                break;
-       case SFM_SLASH:
-               *target = '\\';
-               break;
        case SFM_SPACE:
                *target = ' ';
                break;
index dc2f4cf..5657b79 100644 (file)
@@ -601,10 +601,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
        }
 
        count = 0;
+       /*
+        * We know that all the name entries in the protocols array
+        * are short (< 16 bytes anyway) and are NUL terminated.
+        */
        for (i = 0; i < CIFS_NUM_PROT; i++) {
-               strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
-               count += strlen(protocols[i].name) + 1;
-               /* null at end of source and target buffers anyway */
+               size_t len = strlen(protocols[i].name) + 1;
+
+               memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+               count += len;
        }
        inc_rfc1001_len(pSMB, count);
        pSMB->ByteCount = cpu_to_le16(count);
index c832a8a..7aa08db 100644 (file)
@@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
        if (tcon == NULL)
                return -ENOMEM;
 
-       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
 
        /* cannot fail */
        nls_codepage = load_nls_default();
index d32eaa4..6e8765f 100644 (file)
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_READ;
        oparms.create_options = CREATE_NOT_DIR;
+       if (backup_cred(cifs_sb))
+               oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
        oparms.disposition = FILE_OPEN;
        oparms.path = path;
        oparms.fid = &fid;
index dacb2c0..6926685 100644 (file)
@@ -402,9 +402,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                        (struct smb_com_transaction_change_notify_rsp *)buf;
                struct file_notify_information *pnotify;
                __u32 data_offset = 0;
+               size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
                if (get_bcc(buf) > sizeof(struct file_notify_information)) {
                        data_offset = le32_to_cpu(pSMBr->DataOffset);
 
+                       if (data_offset >
+                           len - sizeof(struct file_notify_information)) {
+                               cifs_dbg(FYI, "invalid data_offset %u\n",
+                                        data_offset);
+                               return true;
+                       }
                        pnotify = (struct file_notify_information *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
                        cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
index eeab81c..e169e1a 100644 (file)
@@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
 
                new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
                                pfData->FileNameLength;
-       } else
-               new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+       } else {
+               u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+
+               if (old_entry + next_offset < old_entry) {
+                       cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+                       return NULL;
+               }
+               new_entry = old_entry + next_offset;
+       }
        cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
        /* validate that new_entry is not past end of SMB */
        if (new_entry >= end_of_smb) {
index db04536..6a9c475 100644 (file)
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
                 * MacOS server pads after SMB2.1 write response with 3 bytes
                 * of junk. Other servers match RFC1001 len to actual
                 * SMB2/SMB3 frame length (header + smb2 response specific data)
-                * Some windows servers do too when compounding is used.
-                * Log the server error (once), but allow it and continue
+                * Some windows servers also pad up to 8 bytes when compounding.
+                * If pad is longer than eight bytes, log the server behavior
+                * (once), since may indicate a problem but allow it and continue
                 * since the frame is parseable.
                 */
                if (clc_len < len) {
-                       printk_once(KERN_WARNING
-                               "SMB2 server sent bad RFC1001 len %d not %d\n",
-                               len, clc_len);
+                       pr_warn_once(
+                            "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+                            len, clc_len, command, mid);
                        return 0;
                }
+               pr_warn_once(
+                       "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
+                       len, clc_len, command, mid);
 
                return 1;
        }
index 247a98e..d954ce3 100644 (file)
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_WRITE_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = fid;
        oparms.reconnect = false;
 
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = {
 struct smb_version_values smb3any_values = {
        .version_string = SMB3ANY_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = {
 struct smb_version_values smbdefault_values = {
        .version_string = SMBDEFAULT_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = {
 struct smb_version_values smb30_values = {
        .version_string = SMB30_VERSION_STRING,
        .protocol_id = SMB30_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = {
 struct smb_version_values smb302_values = {
        .version_string = SMB302_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = {
 struct smb_version_values smb311_values = {
        .version_string = SMB311_VERSION_STRING,
        .protocol_id = SMB311_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
index 5740aa8..f54d07b 100644 (file)
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
+       else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+                 (oparms->create_options & CREATE_NOT_FILE))
+               req->RequestedOplockLevel = *oplock; /* no srv lease support */
        else {
                rc = add_lease_context(server, iov, &n_iov,
                                       oparms->fid->lease_key, oplock);
@@ -2456,14 +2459,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        /* We check for obvious errors in the output buffer length and offset */
        if (*plen == 0)
                goto ioctl_exit; /* server returned no data */
-       else if (*plen > 0xFF00) {
+       else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
                cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
                *plen = 0;
                rc = -EIO;
                goto ioctl_exit;
        }
 
-       if (rsp_iov.iov_len < le32_to_cpu(rsp->OutputOffset) + *plen) {
+       if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
                cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
                        le32_to_cpu(rsp->OutputOffset));
                *plen = 0;
@@ -3574,33 +3577,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
        int len;
        unsigned int entrycount = 0;
        unsigned int next_offset = 0;
-       FILE_DIRECTORY_INFO *entryptr;
+       char *entryptr;
+       FILE_DIRECTORY_INFO *dir_info;
 
        if (bufstart == NULL)
                return 0;
 
-       entryptr = (FILE_DIRECTORY_INFO *)bufstart;
+       entryptr = bufstart;
 
        while (1) {
-               entryptr = (FILE_DIRECTORY_INFO *)
-                                       ((char *)entryptr + next_offset);
-
-               if ((char *)entryptr + size > end_of_buf) {
+               if (entryptr + next_offset < entryptr ||
+                   entryptr + next_offset > end_of_buf ||
+                   entryptr + next_offset + size > end_of_buf) {
                        cifs_dbg(VFS, "malformed search entry would overflow\n");
                        break;
                }
 
-               len = le32_to_cpu(entryptr->FileNameLength);
-               if ((char *)entryptr + len + size > end_of_buf) {
+               entryptr = entryptr + next_offset;
+               dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+
+               len = le32_to_cpu(dir_info->FileNameLength);
+               if (entryptr + len < entryptr ||
+                   entryptr + len > end_of_buf ||
+                   entryptr + len + size > end_of_buf) {
                        cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
                                 end_of_buf);
                        break;
                }
 
-               *lastentry = (char *)entryptr;
+               *lastentry = entryptr;
                entrycount++;
 
-               next_offset = le32_to_cpu(entryptr->NextEntryOffset);
+               next_offset = le32_to_cpu(dir_info->NextEntryOffset);
                if (!next_offset)
                        break;
        }
index 34830f6..8220a16 100644 (file)
@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state,
        write_sequnlock(&state->seqlock);
 }
 
+static void nfs_state_clear_delegation(struct nfs4_state *state)
+{
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       write_sequnlock(&state->seqlock);
+}
+
 static int update_open_stateid(struct nfs4_state *state,
                const nfs4_stateid *open_stateid,
                const nfs4_stateid *delegation,
@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-       write_seqlock(&state->seqlock);
-       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-       write_sequnlock(&state->seqlock);
-       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       nfs_state_clear_delegation(state);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
        case FMODE_WRITE:
@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
                const nfs4_stateid *stateid)
 {
        nfs_remove_bad_delegation(state->inode, stateid);
-       write_seqlock(&state->seqlock);
-       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-       write_sequnlock(&state->seqlock);
-       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       nfs_state_clear_delegation(state);
 }
 
 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
        delegation = rcu_dereference(NFS_I(state->inode)->delegation);
        if (delegation == NULL) {
                rcu_read_unlock();
+               nfs_state_clear_delegation(state);
                return;
        }
 
        nfs4_stateid_copy(&stateid, &delegation->stateid);
-       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
-               !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
-                       &delegation->flags)) {
+       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+               rcu_read_unlock();
+               nfs_state_clear_delegation(state);
+               return;
+       }
+
+       if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+                               &delegation->flags)) {
                rcu_read_unlock();
-               nfs_finish_clear_delegation_stateid(state, &stateid);
                return;
        }
 
index 3df0eb5..40a08cd 100644 (file)
@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
 
        if (!nfs4_state_mark_reclaim_nograce(clp, state))
                return -EBADF;
+       nfs_inode_find_delegation_state_and_recover(state->inode,
+                       &state->stateid);
        dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
                        clp->cl_hostname);
        nfs4_schedule_state_manager(clp);
index a275fba..b1483b3 100644 (file)
@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
                TP_fast_assign(
                        __entry->error = error;
                        __entry->fhandle = nfs_fhandle_hash(fhandle);
-                       if (inode != NULL) {
+                       if (!IS_ERR_OR_NULL(inode)) {
                                __entry->fileid = NFS_FILEID(inode);
                                __entry->dev = inode->i_sb->s_dev;
                        } else {
@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
                TP_fast_assign(
                        __entry->error = error;
                        __entry->fhandle = nfs_fhandle_hash(fhandle);
-                       if (inode != NULL) {
+                       if (!IS_ERR_OR_NULL(inode)) {
                                __entry->fileid = NFS_FILEID(inode);
                                __entry->dev = inode->i_sb->s_dev;
                        } else {
index e8f232d..7d9a51e 100644 (file)
@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
        return ret;
 }
 
-static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
 {
        /*
         * send layoutcommit as it can hold up layoutreturn due to lseg
         * reference
         */
        pnfs_layoutcommit_inode(lo->plh_inode, false);
-       return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
+       return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
                                   nfs_wait_bit_killable,
-                                  TASK_UNINTERRUPTIBLE);
+                                  TASK_KILLABLE);
 }
 
 static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino,
        }
 
 lookup_again:
-       nfs4_client_recover_expired_lease(clp);
+       lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+       if (IS_ERR(lseg))
+               goto out;
        first = false;
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1863,9 +1865,9 @@ lookup_again:
        if (list_empty(&lo->plh_segs) &&
            atomic_read(&lo->plh_outstanding) != 0) {
                spin_unlock(&ino->i_lock);
-               if (wait_var_event_killable(&lo->plh_outstanding,
-                                       atomic_read(&lo->plh_outstanding) == 0
-                                       || !list_empty(&lo->plh_segs)))
+               lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
+                                       atomic_read(&lo->plh_outstanding)));
+               if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
                        goto out_put_layout_hdr;
                pnfs_put_layout_hdr(lo);
                goto lookup_again;
@@ -1898,8 +1900,11 @@ lookup_again:
                if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
                                     &lo->plh_flags)) {
                        spin_unlock(&ino->i_lock);
-                       wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
-                                   TASK_UNINTERRUPTIBLE);
+                       lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
+                                               NFS_LAYOUT_FIRST_LAYOUTGET,
+                                               TASK_KILLABLE));
+                       if (IS_ERR(lseg))
+                               goto out_put_layout_hdr;
                        pnfs_put_layout_hdr(lo);
                        dprintk("%s retrying\n", __func__);
                        goto lookup_again;
@@ -1925,7 +1930,8 @@ lookup_again:
        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                spin_unlock(&ino->i_lock);
                dprintk("%s wait for layoutreturn\n", __func__);
-               if (pnfs_prepare_to_retry_layoutget(lo)) {
+               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+               if (!IS_ERR(lseg)) {
                        if (first)
                                pnfs_clear_first_layoutget(lo);
                        pnfs_put_layout_hdr(lo);
index 03b8ba9..235b959 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * alloc.c - NILFS dat/inode allocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 05149e6..0303c39 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 01fb183..fb5a9a8 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * bmap.c - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2b6ffbe..2c63858 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * bmap.h - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dec98ca..ebb24a3 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btnode.c - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Seiji Kihara.
  * Fully revised by Ryusuke Konishi for stabilization and simplification.
  *
index 4e8aaa1..0f88dbc 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btnode.h - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara.
  * Revised by Ryusuke Konishi.
  */
index 16a7a67..23e043e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btree.c - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2184e47..d1421b6 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btree.h - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index a15a160..8d41311 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * cpfile.c - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 6eca972..6336222 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * cpfile.h - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dffedb2..6f40666 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dat.c - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 57dc6cf..b17ee34 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * dat.h - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 582831a..81394e2 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dir.c - NILFS directory entry operations
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji.
  */
 /*
index 96e3ed0..533e24e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * direct.c - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index cfe85e8..ec9a23c 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * direct.h - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 7da0fac..64bc813 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * file.c - NILFS regular file handling primitives including fsync().
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji and Ryusuke Konishi.
  */
 
index 853a831..aa3c328 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * gcinode.c - dummy inodes to buffer blocks for garbage collection
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
  * Revised by Ryusuke Konishi.
  *
index b8fa45c..4140d23 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ifile.c - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 188b94f..a1e1e57 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * ifile.h - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 6a612d8..6710855 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * inode.c - NILFS inode operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 1d2c3d7..9b96d79 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ioctl.c - NILFS ioctl operations.
  *
  * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index c6bc103..700870a 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * mdt.c - meta data file for NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 3f67f39..e77aea4 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * mdt.h - NILFS meta data file prototype and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index dd52d3f..9fe6d4a 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * namei.c - NILFS pathname lookup operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
  */
 /*
index 33f8c8f..a2f247b 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * nilfs.h - NILFS local header file.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato and Ryusuke Konishi.
  */
 
index 4cb850a..329a056 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * page.c - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index f3687c9..62b9bb4 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * page.h - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index 5139efe..140b663 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * recovery.c - NILFS recovery logic
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 68cb9e4..20c479b 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segbuf.c - NILFS segment buffer
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 10e1693..9bea1bd 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segbuf.h - NILFS Segment buffer prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 0953635..445eef4 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segment.c - NILFS segment constructor.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 04634e3..f5cf530 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segment.h - NILFS Segment constructor prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index c7fa139..bf3f8f0 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sufile.c - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  * Revised by Ryusuke Konishi.
  */
index 673a891..c4e2c7a 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sufile.h - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 1b9067c..26290aa 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * super.c - NILFS module and super block management.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 /*
index 4b25837..e60be7b 100644 (file)
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sysfs.c - sysfs support implementation.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 648cedf..d001eb8 100644 (file)
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sysfs.h - sysfs support declarations.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 1a85317..484785c 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * the_nilfs.c - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 36da177..380a543 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * the_nilfs.h - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index f174397..ababdbf 100644 (file)
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
 
        iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
 
-       if ((mask & FS_MODIFY) ||
-           (test_mask & to_tell->i_fsnotify_mask)) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
-       }
-
-       if (mnt && ((mask & FS_MODIFY) ||
-                   (test_mask & mnt->mnt_fsnotify_mask))) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
+               fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       if (mnt) {
                iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
                        fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
        }
index 32e9282..aeaefd2 100644 (file)
@@ -131,9 +131,6 @@ static int ovl_open(struct inode *inode, struct file *file)
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
-       file->f_mapping = realfile->f_mapping;
-
        file->private_data = realfile;
 
        return 0;
@@ -334,6 +331,25 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
        return ret;
 }
 
+static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       int ret;
+
+       ret = ovl_real_fdget(file, &real);
+       if (ret)
+               return ret;
+
+       old_cred = ovl_override_creds(file_inode(file)->i_sb);
+       ret = vfs_fadvise(real.file, offset, len, advice);
+       revert_creds(old_cred);
+
+       fdput(real);
+
+       return ret;
+}
+
 static long ovl_real_ioctl(struct file *file, unsigned int cmd,
                           unsigned long arg)
 {
@@ -502,6 +518,7 @@ const struct file_operations ovl_file_operations = {
        .fsync          = ovl_fsync,
        .mmap           = ovl_mmap,
        .fallocate      = ovl_fallocate,
+       .fadvise        = ovl_fadvise,
        .unlocked_ioctl = ovl_ioctl,
        .compat_ioctl   = ovl_compat_ioctl,
 
index e0bb217..b6ac545 100644 (file)
@@ -467,6 +467,10 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -EOPNOTSUPP;
 
        old_cred = ovl_override_creds(inode->i_sb);
+
+       if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
+               filemap_write_and_wait(realinode->i_mapping);
+
        err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
        revert_creds(old_cred);
 
@@ -500,6 +504,11 @@ static const struct inode_operations ovl_special_inode_operations = {
        .update_time    = ovl_update_time,
 };
 
+const struct address_space_operations ovl_aops = {
+       /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
+       .direct_IO              = noop_direct_IO,
+};
+
 /*
  * It is possible to stack overlayfs instance on top of another
  * overlayfs instance as lower layer. We need to annonate the
@@ -571,6 +580,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
        case S_IFREG:
                inode->i_op = &ovl_file_inode_operations;
                inode->i_fop = &ovl_file_operations;
+               inode->i_mapping->a_ops = &ovl_aops;
                break;
 
        case S_IFDIR:
index 2e0fc93..30adc9d 100644 (file)
@@ -982,16 +982,6 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
        if (err)
                goto out;
 
-       err = -EBUSY;
-       if (ovl_inuse_trylock(upperpath->dentry)) {
-               ofs->upperdir_locked = true;
-       } else if (ofs->config.index) {
-               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
-               goto out;
-       } else {
-               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
-       }
-
        upper_mnt = clone_private_mount(upperpath);
        err = PTR_ERR(upper_mnt);
        if (IS_ERR(upper_mnt)) {
@@ -1002,6 +992,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
        /* Don't inherit atime flags */
        upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
        ofs->upper_mnt = upper_mnt;
+
+       err = -EBUSY;
+       if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+               ofs->upperdir_locked = true;
+       } else if (ofs->config.index) {
+               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
+               goto out;
+       } else {
+               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+       }
+
        err = 0;
 out:
        return err;
@@ -1101,8 +1102,10 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
                goto out;
        }
 
+       ofs->workbasedir = dget(workpath.dentry);
+
        err = -EBUSY;
-       if (ovl_inuse_trylock(workpath.dentry)) {
+       if (ovl_inuse_trylock(ofs->workbasedir)) {
                ofs->workdir_locked = true;
        } else if (ofs->config.index) {
                pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
@@ -1111,7 +1114,6 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
                pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
        }
 
-       ofs->workbasedir = dget(workpath.dentry);
        err = ovl_make_workdir(ofs, &workpath);
        if (err)
                goto out;
index 951a14e..0792595 100644 (file)
@@ -429,7 +429,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
        vaddr = vmap(pages, page_count, VM_MAP, prot);
        kfree(pages);
 
-       return vaddr;
+       /*
+        * Since vmap() uses page granularity, we must add the offset
+        * into the page here, to get the byte granularity address
+        * into the mapping to represent the actual "start" location.
+        */
+       return vaddr + offset_in_page(start);
 }
 
 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -448,6 +453,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
        else
                va = ioremap_wc(start, size);
 
+       /*
+        * Since request_mem_region() and ioremap() are byte-granularity
+        * there is no need handle anything special like we do when the
+        * vmap() case in persistent_ram_vmap() above.
+        */
        return va;
 }
 
@@ -468,7 +478,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
                return -ENOMEM;
        }
 
-       prz->buffer = prz->vaddr + offset_in_page(start);
+       prz->buffer = prz->vaddr;
        prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
 
        return 0;
@@ -515,7 +525,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 
        if (prz->vaddr) {
                if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
-                       vunmap(prz->vaddr);
+                       /* We must vunmap() at page-granularity. */
+                       vunmap(prz->vaddr - offset_in_page(prz->paddr));
                } else {
                        iounmap(prz->vaddr);
                        release_mem_region(prz->paddr, prz->size);
index 66d1d45..d356f80 100644 (file)
@@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
 #define ioport_map ioport_map
 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 {
-       return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
+       port &= IO_SPACE_LIMIT;
+       return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
 #endif
 
index 34aec30..6d766a1 100644 (file)
@@ -56,6 +56,7 @@ struct blkcg {
        struct list_head                all_blkcgs_node;
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct list_head                cgwb_list;
+       refcount_t                      cgwb_refcnt;
 #endif
 };
 
@@ -89,7 +90,6 @@ struct blkg_policy_data {
        /* the blkg and policy id this per-policy data belongs to */
        struct blkcg_gq                 *blkg;
        int                             plid;
-       bool                            offline;
 };
 
 /*
@@ -387,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
        return cpd ? cpd->blkcg : NULL;
 }
 
+extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ */
+static inline void blkcg_cgwb_get(struct blkcg *blkcg)
+{
+       refcount_inc(&blkcg->cgwb_refcnt);
+}
+
+/**
+ * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ * When this count goes to zero, all active wb has finished so the
+ * blkcg can continue destruction by calling blkcg_destroy_blkgs().
+ * This work may occur in cgwb_release_workfn() on the cgwb_release
+ * workqueue.
+ */
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+       if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
+               blkcg_destroy_blkgs(blkcg);
+}
+
+#else
+
+static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
+
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+       /* wb isn't being accounted, so trigger destruction right away */
+       blkcg_destroy_blkgs(blkcg);
+}
+
+#endif
+
 /**
  * blkg_path - format cgroup path of blkg
  * @blkg: blkg of interest
index d6869e0..6980014 100644 (file)
@@ -54,7 +54,7 @@ struct blk_stat_callback;
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.
  */
-#define BLKCG_MAX_POLS         3
+#define BLKCG_MAX_POLS         5
 
 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
index 3332270..6c0b4a1 100644 (file)
@@ -1763,6 +1763,7 @@ struct file_operations {
                        u64);
        int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
                        u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 } __randomize_layout;
 
 struct inode_operations {
@@ -3459,4 +3460,8 @@ static inline bool dir_relax_shared(struct inode *inode)
 extern bool path_noexec(const struct path *path);
 extern void inode_nohighmem(struct inode *inode);
 
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+                      int advice);
+
 #endif /* _LINUX_FS_H */
index 834e646..d44a783 100644 (file)
@@ -526,6 +526,7 @@ struct hid_input {
        const char *name;
        bool registered;
        struct list_head reports;       /* the list of reports */
+       unsigned int application;       /* application usage for this input */
 };
 
 enum hid_type {
index 7a45271..66d94b4 100644 (file)
@@ -362,8 +362,8 @@ struct mlx5_frag_buf {
 struct mlx5_frag_buf_ctrl {
        struct mlx5_frag_buf    frag_buf;
        u32                     sz_m1;
-       u32                     frag_sz_m1;
-       u32                     strides_offset;
+       u16                     frag_sz_m1;
+       u16                     strides_offset;
        u8                      log_sz;
        u8                      log_stride;
        u8                      log_frag_strides;
@@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
 }
 
 static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
-                                       u32 strides_offset,
+                                       u16 strides_offset,
                                        struct mlx5_frag_buf_ctrl *fbc)
 {
        fbc->log_stride = log_stride;
@@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
index cd2bc93..5ed8f62 100644 (file)
@@ -341,7 +341,7 @@ struct mm_struct {
        struct {
                struct vm_area_struct *mmap;            /* list of VMAs */
                struct rb_root mm_rb;
-               u32 vmacache_seqnum;                   /* per-thread vmacache */
+               u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
                unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
index 5fe8768..d7016dc 100644 (file)
@@ -32,7 +32,7 @@
 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
 
 struct vmacache {
-       u32 seqnum;
+       u64 seqnum;
        struct vm_area_struct *vmas[VMACACHE_SIZE];
 };
 
index 1298a7d..01797cb 100644 (file)
@@ -754,6 +754,7 @@ struct tb_service_id {
  * struct typec_device_id - USB Type-C alternate mode identifiers
  * @svid: Standard or Vendor ID
  * @mode: Mode index
+ * @driver_data: Driver specific data
  */
 struct typec_device_id {
        __u16 svid;
index e72ca8d..6925828 100644 (file)
@@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
 int devm_request_pci_bus_resources(struct device *dev,
                                   struct list_head *resources);
 
+/* Temporary until new and working PCI SBR API in place */
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+
 #define pci_bus_for_each_resource(bus, res, i)                         \
        for (i = 0;                                                     \
            (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
index 99d366c..d157983 100644 (file)
 
 #define PCI_VENDOR_ID_OCZ              0x1b85
 
+#define PCI_VENDOR_ID_NCUBE            0x10ff
+
 #endif /* _LINUX_PCI_IDS_H */
index 5d73880..a5a3cfc 100644 (file)
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
 extern int persistent_clock_is_local;
 
 extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
-                                          struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+                                         struct timespec64 *boot_offset);
 extern int update_persistent_clock64(struct timespec64 now);
 
 /*
index 7f2e16e..041f7e5 100644 (file)
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void);
                 * For rcuidle callers, use srcu since sched-rcu        \
                 * doesn't work from the idle path.                     \
                 */                                                     \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
                        idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+                       rcu_irq_enter_irqson();                         \
+               }                                                       \
                                                                        \
                it_func_ptr = rcu_dereference_raw((tp)->funcs);         \
                                                                        \
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void);
                        } while ((++it_func_ptr)->func);                \
                }                                                       \
                                                                        \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
+                       rcu_irq_exit_irqson();                          \
                        srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+               }                                                       \
                                                                        \
                preempt_enable_notrace();                               \
        } while (0)
index 5c7f010..47a3441 100644 (file)
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #ifdef CONFIG_DEBUG_VM_VMACACHE
                VMACACHE_FIND_CALLS,
                VMACACHE_FIND_HITS,
-               VMACACHE_FULL_FLUSHES,
 #endif
 #ifdef CONFIG_SWAP
                SWAP_RA,
index 3e9a963..6fce268 100644 (file)
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
        memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
 }
 
-extern void vmacache_flush_all(struct mm_struct *mm);
 extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
                                                    unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 static inline void vmacache_invalidate(struct mm_struct *mm)
 {
        mm->vmacache_seqnum++;
-
-       /* deal with overflows */
-       if (unlikely(mm->vmacache_seqnum == 0))
-               vmacache_flush_all(mm);
 }
 
 #endif /* __LINUX_VMACACHE_H */
index 9a85097..8ebabc9 100644 (file)
@@ -4865,8 +4865,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * Return: 0 on success. -ENODATA.
  */
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
-                       struct ieee80211_wmm_rule *rule);
+int reg_query_regdb_wmm(char *alpha2, int freq,
+                       struct ieee80211_reg_rule *rule);
 
 /*
  * callbacks for asynchronous cfg80211 methods, notification
index d5f62cc..3394d75 100644 (file)
@@ -30,7 +30,7 @@ struct nf_conn_timeout {
 };
 
 static inline unsigned int *
-nf_ct_timeout_data(struct nf_conn_timeout *t)
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
 {
        struct nf_ct_timeout *timeout;
 
index 60f8cc8..3469750 100644 (file)
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
 struct ieee80211_reg_rule {
        struct ieee80211_freq_range freq_range;
        struct ieee80211_power_rule power_rule;
-       struct ieee80211_wmm_rule *wmm_rule;
+       struct ieee80211_wmm_rule wmm_rule;
        u32 flags;
        u32 dfs_cac_ms;
+       bool has_wmm;
 };
 
 struct ieee80211_regdomain {
        struct rcu_head rcu_head;
        u32 n_reg_rules;
-       u32 n_wmm_rules;
        char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        struct ieee80211_reg_rule reg_rules[];
index 7b8c9e1..910cc43 100644 (file)
@@ -65,7 +65,7 @@
 
 /* keyctl structures */
 struct keyctl_dh_params {
-       __s32 private;
+       __s32 dh_private;
        __s32 prime;
        __s32 base;
 };
index eeb787b..f35eb72 100644 (file)
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
 
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 /*
index dc520e1..8b73cb6 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/types.h>
 #include <linux/socket.h>              /* For __kernel_sockaddr_storage. */
+#include <linux/in6.h>                 /* For struct in6_addr. */
 
 #define RDS_IB_ABI_VERSION             0x301
 
index b1e22c4..84c3de8 100644 (file)
@@ -176,7 +176,7 @@ struct vhost_memory {
 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
 
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
-#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
 /* VHOST_NET specific defines */
 
index 80b52b4..a2ab516 100644 (file)
 
 #include <xen/page.h>
 
+extern bool xen_scrub_pages;
+
 static inline void xenmem_reservation_scrub_page(struct page *page)
 {
-#ifdef CONFIG_XEN_SCRUB_PAGES
-       clear_highpage(page);
-#endif
+       if (xen_scrub_pages)
+               clear_highpage(page);
 }
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
index b0eb375..4cd402e 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
        }
 
        ipc_unlock_object(ipcp);
+       ipcp = ERR_PTR(-EIDRM);
 err:
        rcu_read_unlock();
        /*
index cf5195c..488ef96 100644 (file)
@@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk)
 }
 
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
 
 static void bpf_tcp_release(struct sock *sk)
 {
@@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk)
                goto out;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
@@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
        close_fun = psock->save_close;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                        /* If another thread deleted this object skip deletion.
                         * The refcnt on psock may or may not be zero.
                         */
-                       if (l) {
+                       if (l && l == link) {
                                hlist_del_rcu(&link->hash_node);
                                smap_release_sock(psock, link->sk);
                                free_htab_elem(htab, link);
@@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
        md->sg_start = i;
 }
 
-static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
+static int free_sg(struct sock *sk, int start,
+                  struct sk_msg_buff *md, bool charge)
 {
        struct scatterlist *sg = md->sg_data;
        int i = start, free = 0;
 
        while (sg[i].length) {
                free += sg[i].length;
-               sk_mem_uncharge(sk, sg[i].length);
+               if (charge)
+                       sk_mem_uncharge(sk, sg[i].length);
                if (!md->skb)
                        put_page(sg_page(&sg[i]));
                sg[i].length = 0;
@@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        return free;
 }
 
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
 {
-       int free = free_sg(sk, md->sg_start, md);
+       int free = free_sg(sk, md->sg_start, md, charge);
 
        md->sg_start = md->sg_end;
        return free;
@@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
 
 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
 {
-       return free_sg(sk, md->sg_curr, md);
+       return free_sg(sk, md->sg_curr, md, true);
 }
 
 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
                list_add_tail(&r->list, &psock->ingress);
                sk->sk_data_ready(sk);
        } else {
-               free_start_sg(sk, r);
+               free_start_sg(sk, r, true);
                kfree(r);
        }
 
@@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
                release_sock(sk);
        }
        smap_release_sock(psock, sk);
-       if (unlikely(err))
-               goto out;
-       return 0;
+       return err;
 out_rcu:
        rcu_read_unlock();
-out:
-       free_bytes_sg(NULL, send, md, false);
-       return err;
+       return 0;
 }
 
 static inline void bpf_md_init(struct smap_psock *psock)
@@ -822,7 +820,7 @@ more_data:
        case __SK_PASS:
                err = bpf_tcp_push(sk, send, m, flags, true);
                if (unlikely(err)) {
-                       *copied -= free_start_sg(sk, m);
+                       *copied -= free_start_sg(sk, m, true);
                        break;
                }
 
@@ -845,16 +843,17 @@ more_data:
                lock_sock(sk);
 
                if (unlikely(err < 0)) {
-                       free_start_sg(sk, m);
+                       int free = free_start_sg(sk, m, false);
+
                        psock->sg_size = 0;
                        if (!cork)
-                               *copied -= send;
+                               *copied -= free;
                } else {
                        psock->sg_size -= send;
                }
 
                if (cork) {
-                       free_start_sg(sk, m);
+                       free_start_sg(sk, m, true);
                        psock->sg_size = 0;
                        kfree(m);
                        m = NULL;
@@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 
        rcu_read_lock();
        psock = smap_psock_sk(sk);
@@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                goto out;
        rcu_read_unlock();
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
-               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
        lock_sock(sk);
 bytes_ready:
        while (copied != len) {
@@ -1122,7 +1120,7 @@ wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
                if (err) {
                        if (m && m != psock->cork)
-                               free_start_sg(sk, m);
+                               free_start_sg(sk, m, true);
                        goto out_err;
                }
        }
@@ -1464,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
        schedule_work(&psock->gc_work);
 }
 
+static bool psock_is_smap_sk(struct sock *sk)
+{
+       return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
+}
+
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
-               tcp_cleanup_ulp(sock);
+               if (psock_is_smap_sk(sock))
+                       tcp_cleanup_ulp(sock);
                write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
                write_unlock_bh(&sock->sk_callback_lock);
@@ -1581,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w)
                bpf_prog_put(psock->bpf_tx_msg);
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -1894,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         * doesn't update user data.
         */
        if (psock) {
+               if (!psock_is_smap_sk(sock)) {
+                       err = -EBUSY;
+                       goto out_progs;
+               }
                if (READ_ONCE(psock->bpf_parse) && parse) {
                        err = -EBUSY;
                        goto out_progs;
index aa7fe85..0097ace 100644 (file)
@@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
        bool bringup = st->bringup;
        enum cpuhp_state state;
 
+       if (WARN_ON_ONCE(!st->should_run))
+               return;
+
        /*
         * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
         * that if we see ->should_run we also see the rest of the state.
         */
        smp_mb();
 
-       if (WARN_ON_ONCE(!st->should_run))
-               return;
-
        cpuhp_lock_acquire(bringup);
 
        if (st->single) {
@@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
                if (ret) {
                        st->target = prev_state;
-                       undo_cpu_down(cpu, st);
+                       if (st->state < prev_state)
+                               undo_cpu_down(cpu, st);
                        break;
                }
        }
@@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, target);
-       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+       if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
                cpuhp_reset_state(st, prev_state);
                __cpuhp_kick_ap(st);
        }
index 1c35b7b..de87b02 100644 (file)
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
                return 0;
 #else
        /*
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
         * memory, or by providing a ZONE_DMA32.  If neither is the case, the
         * architecture needs to use an IOMMU instead of the direct mapping.
         */
-       if (mask < DMA_BIT_MASK(32))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
                return 0;
 #endif
        /*
index 2a62b96..c80549b 100644 (file)
@@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
        _perf_event_disable(bp);
 
        err = modify_user_hw_breakpoint_check(bp, attr, true);
-       if (err) {
-               if (!bp->attr.disabled)
-                       _perf_event_enable(bp);
 
-               return err;
-       }
-
-       if (!attr->disabled)
+       if (!bp->attr.disabled)
                _perf_event_enable(bp);
-       return 0;
+
+       return err;
 }
 
 static int perf_event_modify_attr(struct perf_event *event,
@@ -5948,6 +5943,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
                unsigned long sp;
                unsigned int rem;
                u64 dyn_size;
+               mm_segment_t fs;
 
                /*
                 * We dump:
@@ -5965,7 +5961,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
 
                /* Data. */
                sp = perf_user_stack_pointer(regs);
+               fs = get_fs();
+               set_fs(USER_DS);
                rem = __output_copy_user(handle, (void *) sp, dump_size);
+               set_fs(fs);
                dyn_size = dump_size - rem;
 
                perf_output_skip(handle, rem);
index b3814fc..d6b5618 100644 (file)
@@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a
  */
 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
 {
+       int err;
+
        /*
         * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
         * will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -520,15 +522,12 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
        else
                perf_event_disable(bp);
 
-       if (!attr->disabled) {
-               int err = modify_user_hw_breakpoint_check(bp, attr, false);
+       err = modify_user_hw_breakpoint_check(bp, attr, false);
 
-               if (err)
-                       return err;
+       if (!bp->attr.disabled)
                perf_event_enable(bp);
-               bp->attr.disabled = 0;
-       }
-       return 0;
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
 
index d896e9c..f0b5847 100644 (file)
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        goto out;
        }
        /* a new mm has just been created */
-       arch_dup_mmap(oldmm, mm);
-       retval = 0;
+       retval = arch_dup_mmap(oldmm, mm);
 out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
index 01ebdf1..2e62503 100644 (file)
@@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
        case MODULE_STATE_COMING:
                ret = jump_label_add_module(mod);
                if (ret) {
-                       WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
+                       WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
                        jump_label_del_module(mod);
                }
                break;
index e406c5f..dd13f86 100644 (file)
@@ -55,7 +55,6 @@
 
 #include "lockdep_internals.h"
 
-#include <trace/events/preemptirq.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/lock.h>
 
index 1a81a12..3f8a351 100644 (file)
@@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
                /*
                 * wake_up_process() paired with set_current_state()
                 * inserts sufficient barriers to make sure @owner either sees
-                * it's wounded in __ww_mutex_lock_check_stamp() or has a
+                * it's wounded in __ww_mutex_check_kill() or has a
                 * wakeup pending to re-read the wounded state.
                 */
                if (owner != current)
@@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, current);
 
        lock_contended(&lock->dep_map, ip);
 
index 5b915b3..0be047d 100644 (file)
@@ -324,7 +324,7 @@ static int __test_cycle(unsigned int nthreads)
                if (!cycle->result)
                        continue;
 
-               pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+               pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
                       n, nthreads, cycle->result);
                ret = -EINVAL;
                break;
index fd6f8ed..9bf5404 100644 (file)
@@ -351,7 +351,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
  */
 
 enum log_flags {
-       LOG_NOCONS      = 1,    /* suppress print, do not print to console */
        LOG_NEWLINE     = 2,    /* text ended with a newline */
        LOG_PREFIX      = 4,    /* text started with a prefix */
        LOG_CONT        = 8,    /* text is a fragment of a continuation line */
@@ -1881,9 +1880,6 @@ int vprintk_store(int facility, int level,
        if (dict)
                lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-       if (suppress_message_printing(level))
-               lflags |= LOG_NOCONS;
-
        return log_output(facility, level, lflags,
                          dict, dictlen, text, text_len);
 }
@@ -2032,6 +2028,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
                                 const char *text, size_t len) {}
 static size_t msg_print_text(const struct printk_log *msg,
                             bool syslog, char *buf, size_t size) { return 0; }
+static bool suppress_message_printing(int level) { return false; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -2368,10 +2365,11 @@ skip:
                        break;
 
                msg = log_from_idx(console_idx);
-               if (msg->flags & LOG_NOCONS) {
+               if (suppress_message_printing(msg->level)) {
                        /*
-                        * Skip record if !ignore_loglevel, and
-                        * record has level above the console loglevel.
+                        * Skip record we have buffered and already printed
+                        * directly to the console when we received it, and
+                        * record that has level above the console loglevel.
                         */
                        console_idx = log_next(console_idx);
                        console_seq++;
index a0a74c5..0913b4d 100644 (file)
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
        return printk_safe_log_store(s, fmt, args);
 }
 
-void printk_nmi_enter(void)
+void notrace printk_nmi_enter(void)
 {
        this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
 }
 
-void printk_nmi_exit(void)
+void notrace printk_nmi_exit(void)
 {
        this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
 }
index 60caf1f..6383aa6 100644 (file)
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-       static_key_disable(&sched_feat_keys[i]);
+       static_key_disable_cpuslocked(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-       static_key_enable(&sched_feat_keys[i]);
+       static_key_enable_cpuslocked(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
 
        /* Ensure the static_key remains in a consistent state */
        inode = file_inode(filp);
+       cpus_read_lock();
        inode_lock(inode);
        ret = sched_feat_set(cmp);
        inode_unlock(inode);
+       cpus_read_unlock();
        if (ret < 0)
                return ret;
 
index b39fb59..f808ddf 100644 (file)
@@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
  * @cfs_rq: cfs_rq to attach to
  * @se: sched_entity to attach
+ * @flags: migration hints
  *
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
@@ -7263,6 +7264,7 @@ static void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct cfs_rq *cfs_rq, *pos;
+       const struct sched_class *curr_class;
        struct rq_flags rf;
        bool done = true;
 
@@ -7299,8 +7301,10 @@ static void update_blocked_averages(int cpu)
                if (cfs_rq_has_blocked(cfs_rq))
                        done = false;
        }
-       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
        /* Don't need periodic decay once load/util_avg are null */
        if (others_have_blocked(rq))
@@ -7365,13 +7369,16 @@ static inline void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct cfs_rq *cfs_rq = &rq->cfs;
+       const struct sched_class *curr_class;
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
-       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
@@ -7482,10 +7489,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long scale_rt_capacity(int cpu)
+static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+       unsigned long max = arch_scale_cpu_capacity(sd, cpu);
        unsigned long used, free;
        unsigned long irq;
 
@@ -7507,7 +7514,7 @@ static unsigned long scale_rt_capacity(int cpu)
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long capacity = scale_rt_capacity(cpu);
+       unsigned long capacity = scale_rt_capacity(sd, cpu);
        struct sched_group *sdg = sd->groups;
 
        cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
@@ -8269,7 +8276,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 force_balance:
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(env, &sds);
-       return sds.busiest;
+       return env->imbalance ? sds.busiest : NULL;
 
 out_balanced:
        env->imbalance = 0;
@@ -9638,7 +9645,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
         * - A task which has been woken up by try_to_wake_up() and
         *   waiting for actually being woken up by sched_ttwu_pending().
         */
-       if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+       if (!se->sum_exec_runtime ||
+           (p->state == TASK_WAKING && p->sched_remote_wakeup))
                return true;
 
        return false;
index 56a0fed..505a41c 100644 (file)
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
 
        n = sched_max_numa_distance;
 
-       if (sched_domains_numa_levels <= 1) {
+       if (sched_domains_numa_levels <= 2) {
                sched_numa_topology_type = NUMA_DIRECT;
                return;
        }
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
                        break;
        }
 
-       if (!level)
-               return;
-
        /*
         * 'level' contains the number of unique distances
         *
index f74fb00..0e6e97a 100644 (file)
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
        spin_unlock_irqrestore(&watchdog_lock, *flags);
 }
 
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
 /*
  * Interval: 0.5sec Threshold: 0.0625s
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+       /*
+        * We cannot directly run clocksource_watchdog_kthread() here, because
+        * clocksource_select() calls timekeeping_notify() which uses
+        * stop_machine(). One cannot use stop_machine() from a workqueue() due
+        * lock inversions wrt CPU hotplug.
+        *
+        * Also, we only ever run this work once or twice during the lifetime
+        * of the kernel, so there is no point in creating a more permanent
+        * kthread for this.
+        *
+        * If kthread_run fails the next watchdog scan over the
+        * watchdog_list will find the unstable clock again.
+        */
+       kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
 static void __clocksource_unstable(struct clocksource *cs)
 {
        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
        cs->flags |= CLOCK_SOURCE_UNSTABLE;
 
        /*
-        * If the clocksource is registered clocksource_watchdog_work() will
+        * If the clocksource is registered clocksource_watchdog_kthread() will
         * re-rate and re-select.
         */
        if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
        if (cs->mark_unstable)
                cs->mark_unstable(cs);
 
-       /* kick clocksource_watchdog_work() */
+       /* kick clocksource_watchdog_kthread() */
        if (finished_booting)
                schedule_work(&watchdog_work);
 }
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
  * @cs:                clocksource to be marked unstable
  *
  * This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
  */
 void clocksource_mark_unstable(struct clocksource *cs)
 {
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
        }
 }
 
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
 {
        struct clocksource *cs, *tmp;
        unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
        return select;
 }
 
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
 {
        mutex_lock(&clocksource_mutex);
-       if (__clocksource_watchdog_work())
+       if (__clocksource_watchdog_kthread())
                clocksource_select();
        mutex_unlock(&clocksource_mutex);
+       return 0;
 }
 
 static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
 static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
        /*
         * Run the watchdog first to eliminate unstable clock sources
         */
-       __clocksource_watchdog_work();
+       __clocksource_watchdog_kthread();
        clocksource_select();
        mutex_unlock(&clocksource_mutex);
        return 0;
index 6133167..4966c4f 100644 (file)
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
          time.  This is really bad from a security perspective, and
          so architecture maintainers really need to do what they can
          to get the CRNG seeded sooner after the system is booted.
-         However, since users can not do anything actionble to
+         However, since users cannot do anything actionable to
          address this, by default the kernel will issue only a single
          warning for the first use of unseeded randomness.
 
          Say Y here if you want to receive warnings for all uses of
          unseeded randomness.  This will be of use primarily for
-         those developers interersted in improving the security of
+         those developers interested in improving the security of
          Linux kernels running on their architecture (or
          subarchitecture).
 
index 8716bda..26ef77a 100644 (file)
@@ -32,7 +32,7 @@ ifdef CONFIG_CROSS_MEMORY_ATTACH
 mmu-$(CONFIG_MMU)      += process_vm_access.o
 endif
 
-obj-y                  := filemap.o mempool.o oom_kill.o \
+obj-y                  := filemap.o mempool.o oom_kill.o fadvise.o \
                           maccess.o page_alloc.o page-writeback.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           util.o mmzone.o vmstat.o backing-dev.o \
@@ -49,7 +49,6 @@ else
        obj-y           += bootmem.o
 endif
 
-obj-$(CONFIG_ADVISE_SYSCALLS)  += fadvise.o
 ifdef CONFIG_MMU
        obj-$(CONFIG_ADVISE_SYSCALLS)   += madvise.o
 endif
index f5981e9..8a8bb87 100644 (file)
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work)
 {
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
+       struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 
        mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work)
        css_put(wb->blkcg_css);
        mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
+       /* triggers blkg destruction if cgwb_refcnt becomes zero */
+       blkcg_cgwb_put(blkcg);
+
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
        wb_exit(wb);
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
                        list_add(&wb->memcg_node, memcg_cgwb_list);
                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
+                       blkcg_cgwb_get(blkcg);
                        css_get(memcg_css);
                        css_get(blkcg_css);
                }
index 38c9265..bd10aad 100644 (file)
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-       pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
+       pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
 #ifdef CONFIG_MMU
                "get_unmapped_area %px\n"
 #endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
                "tlb_flush_pending %d\n"
                "def_flags: %#lx(%pGv)\n",
 
-               mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+               mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
 #ifdef CONFIG_MMU
                mm->get_unmapped_area,
 #endif
index 2d8376e..467bcd0 100644 (file)
@@ -27,9 +27,9 @@
  * deactivate the pages and clear PG_Referenced.
  */
 
-int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+                          int advice)
 {
-       struct fd f = fdget(fd);
        struct inode *inode;
        struct address_space *mapping;
        struct backing_dev_info *bdi;
@@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
        pgoff_t start_index;
        pgoff_t end_index;
        unsigned long nrpages;
-       int ret = 0;
-
-       if (!f.file)
-               return -EBADF;
 
-       inode = file_inode(f.file);
-       if (S_ISFIFO(inode->i_mode)) {
-               ret = -ESPIPE;
-               goto out;
-       }
+       inode = file_inode(file);
+       if (S_ISFIFO(inode->i_mode))
+               return -ESPIPE;
 
-       mapping = f.file->f_mapping;
-       if (!mapping || len < 0) {
-               ret = -EINVAL;
-               goto out;
-       }
+       mapping = file->f_mapping;
+       if (!mapping || len < 0)
+               return -EINVAL;
 
        bdi = inode_to_bdi(mapping->host);
 
@@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                        /* no bad return value, but ignore advice */
                        break;
                default:
-                       ret = -EINVAL;
+                       return -EINVAL;
                }
-               goto out;
+               return 0;
        }
 
        /*
@@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
 
        switch (advice) {
        case POSIX_FADV_NORMAL:
-               f.file->f_ra.ra_pages = bdi->ra_pages;
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               file->f_ra.ra_pages = bdi->ra_pages;
+               spin_lock(&file->f_lock);
+               file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_RANDOM:
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode |= FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               spin_lock(&file->f_lock);
+               file->f_mode |= FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_SEQUENTIAL:
-               f.file->f_ra.ra_pages = bdi->ra_pages * 2;
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               file->f_ra.ra_pages = bdi->ra_pages * 2;
+               spin_lock(&file->f_lock);
+               file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_WILLNEED:
                /* First and last PARTIAL page! */
@@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                 * Ignore return value because fadvise() shall return
                 * success even if filesystem can't retrieve a hint,
                 */
-               force_page_cache_readahead(mapping, f.file, start_index,
-                                          nrpages);
+               force_page_cache_readahead(mapping, file, start_index, nrpages);
                break;
        case POSIX_FADV_NOREUSE:
                break;
@@ -183,9 +174,32 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                }
                break;
        default:
-               ret = -EINVAL;
+               return -EINVAL;
        }
-out:
+       return 0;
+}
+
+int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+       if (file->f_op->fadvise)
+               return file->f_op->fadvise(file, offset, len, advice);
+
+       return generic_fadvise(file, offset, len, advice);
+}
+EXPORT_SYMBOL(vfs_fadvise);
+
+#ifdef CONFIG_ADVISE_SYSCALLS
+
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+{
+       struct fd f = fdget(fd);
+       int ret;
+
+       if (!f.file)
+               return -EBADF;
+
+       ret = vfs_fadvise(f.file, offset, len, advice);
+
        fdput(f);
        return ret;
 }
@@ -203,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
 }
 
 #endif
+#endif
index c3bc7e9..533f9b0 100644 (file)
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+                       !pfn_t_devmap(pfn));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-       BUG_ON(!pfn_t_devmap(pfn));
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
index 9a085d5..17dd883 100644 (file)
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
 
        kmemleak_initialized = 1;
 
+       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
+                                    &kmemleak_fops);
+       if (!dentry)
+               pr_warn("Failed to create the debugfs kmemleak file\n");
+
        if (kmemleak_error) {
                /*
                 * Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
                return -ENOMEM;
        }
 
-       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
-                                    &kmemleak_fops);
-       if (!dentry)
-               pr_warn("Failed to create the debugfs kmemleak file\n");
        mutex_lock(&scan_mutex);
        start_scan_thread();
        mutex_unlock(&scan_mutex);
index 4ead5a4..e79cb59 100644 (file)
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
        if (mem_cgroup_out_of_memory(memcg, mask, order))
                return OOM_SUCCESS;
 
-       WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
-               "This looks like a misconfiguration or a kernel bug.");
        return OOM_FAILED;
 }
 
index 9eea6e8..38d94b7 100644 (file)
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        if (__PageMovable(page))
                                return pfn;
                        if (PageHuge(page)) {
-                               if (page_huge_active(page))
+                               if (hugepage_migration_supported(page_hstate(page)) &&
+                                   page_huge_active(page))
                                        return pfn;
                                else
                                        pfn = round_up(pfn + 1,
index b5b25e4..f10aa53 100644 (file)
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
 
                        tlb_gather_mmu(&tlb, mm, start, end);
                        if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
+                               tlb_finish_mmu(&tlb, start, end);
                                ret = false;
                                continue;
                        }
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
        }
 
        select_bad_process(oc);
-       /* Found nothing?!?! Either we hang forever, or we panic. */
-       if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
+       /* Found nothing?!?! */
+       if (!oc->chosen) {
                dump_header(oc, NULL);
-               panic("Out of memory and no killable processes...\n");
+               pr_warn("Out of memory and no killable processes...\n");
+               /*
+                * If we got here due to an actual allocation at the
+                * system level, we cannot survive this and will enter
+                * an endless loop in the allocator. Bail out now.
+                */
+               if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+                       panic("System is deadlocked on memory\n");
        }
        if (oc->chosen && oc->chosen != (void *)-1UL)
                oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
index 05e983f..89d2a2a 100644 (file)
@@ -7708,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                 * handle each tail page individually in migration.
                 */
                if (PageHuge(page)) {
+
+                       if (!hugepage_migration_supported(page_hstate(page)))
+                               goto unmovable;
+
                        iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
                        continue;
                }
index a59ea70..4e63014 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/file.h>
 #include <linux/mm_inline.h>
 #include <linux/blk-cgroup.h>
+#include <linux/fadvise.h>
 
 #include "internal.h"
 
@@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping,
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
 
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
-            pgoff_t index, unsigned long nr)
-{
-       if (!mapping || !mapping->a_ops)
-               return -EINVAL;
-
-       /*
-        * Readahead doesn't make sense for DAX inodes, but we don't want it
-        * to report a failure either.  Instead, we just return success and
-        * don't do any work.
-        */
-       if (dax_mapping(mapping))
-               return 0;
-
-       return force_page_cache_readahead(mapping, filp, index, nr);
-}
-
 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
@@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 
        ret = -EBADF;
        f = fdget(fd);
-       if (f.file) {
-               if (f.file->f_mode & FMODE_READ) {
-                       struct address_space *mapping = f.file->f_mapping;
-                       pgoff_t start = offset >> PAGE_SHIFT;
-                       pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
-                       unsigned long len = end - start + 1;
-                       ret = do_readahead(mapping, f.file, start, len);
-               }
-               fdput(f);
-       }
+       if (!f.file || !(f.file->f_mode & FMODE_READ))
+               goto out;
+
+       /*
+        * The readahead() syscall is intended to run only on files
+        * that can execute readahead. If readahead is not possible
+        * on this file, then we must return -EINVAL.
+        */
+       ret = -EINVAL;
+       if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+           !S_ISREG(file_inode(f.file)->i_mode))
+               goto out;
+
+       ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+out:
+       fdput(f);
        return ret;
 }
 
index d2890a4..9e3ebd2 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(kvmalloc_node);
 
 /**
- * kvfree - free memory allocated with kvmalloc
- * @addr: pointer returned by kvmalloc
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
  *
- * If the memory is allocated from vmalloc area it is freed with vfree().
- * Otherwise kfree() is used.
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Any context except NMI.
  */
 void kvfree(const void *addr)
 {
index ea517be..cdc32a3 100644 (file)
 #endif
 #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
 
-/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
-       struct task_struct *g, *p;
-
-       count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
-       /*
-        * Single threaded tasks need not iterate the entire
-        * list of process. We can avoid the flushing as well
-        * since the mm's seqnum was increased and don't have
-        * to worry about other threads' seqnum. Current's
-        * flush will occur upon the next lookup.
-        */
-       if (atomic_read(&mm->mm_users) == 1)
-               return;
-
-       rcu_read_lock();
-       for_each_process_thread(g, p) {
-               /*
-                * Only flush the vmacache pointers as the
-                * mm seqnum is already set and curr's will
-                * be set upon invalidation when the next
-                * lookup is done.
-                */
-               if (mm == p->mm)
-                       vmacache_flush(p);
-       }
-       rcu_read_unlock();
-}
-
 /*
  * This task may be accessing a foreign mm via (for example)
  * get_user_pages()->find_vma().  The vmacache is task-local and this
index c25eb36..aecdeba 100644 (file)
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+#define sk_msg_iter_var(var)                   \
+       do {                                    \
+               var++;                          \
+               if (var == MAX_SKB_FRAGS)       \
+                       var = 0;                \
+       } while (0)
+
 BPF_CALL_4(bpf_msg_pull_data,
           struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
 {
-       unsigned int len = 0, offset = 0, copy = 0;
+       unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
+       int bytes = end - start, bytes_sg_total;
        struct scatterlist *sg = msg->sg_data;
        int first_sg, last_sg, i, shift;
        unsigned char *p, *to, *from;
-       int bytes = end - start;
        struct page *page;
 
        if (unlikely(flags || end <= start))
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
        i = msg->sg_start;
        do {
                len = sg[i].length;
-               offset += len;
                if (start < offset + len)
                        break;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               offset += len;
+               sk_msg_iter_var(i);
        } while (i != msg->sg_end);
 
        if (unlikely(start >= offset + len))
                return -EINVAL;
 
-       if (!msg->sg_copy[i] && bytes <= len)
-               goto out;
-
        first_sg = i;
+       /* The start may point into the sg element so we need to also
+        * account for the headroom.
+        */
+       bytes_sg_total = start - offset + bytes;
+       if (!msg->sg_copy[i] && bytes_sg_total <= len)
+               goto out;
 
        /* At this point we need to linearize multiple scatterlist
         * elements or a single shared page. Either way we need to
@@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data,
         */
        do {
                copy += sg[i].length;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
-               if (bytes < copy)
+               sk_msg_iter_var(i);
+               if (bytes_sg_total <= copy)
                        break;
        } while (i != msg->sg_end);
        last_sg = i;
 
-       if (unlikely(copy < end - start))
+       if (unlikely(bytes_sg_total > copy))
                return -EINVAL;
 
        page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
        if (unlikely(!page))
                return -ENOMEM;
        p = page_address(page);
-       offset = 0;
 
        i = first_sg;
        do {
                from = sg_virt(&sg[i]);
                len = sg[i].length;
-               to = p + offset;
+               to = p + poffset;
 
                memcpy(to, from, len);
-               offset += len;
+               poffset += len;
                sg[i].length = 0;
                put_page(sg_page(&sg[i]));
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (i != last_sg);
 
        sg[first_sg].length = copy;
@@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data,
         * had a single entry though we can just replace it and
         * be done. Otherwise walk the ring and shift the entries.
         */
-       shift = last_sg - first_sg - 1;
+       WARN_ON_ONCE(last_sg == first_sg);
+       shift = last_sg > first_sg ?
+               last_sg - first_sg - 1 :
+               MAX_SKB_FRAGS - first_sg + last_sg - 1;
        if (!shift)
                goto out;
 
-       i = first_sg + 1;
+       i = first_sg;
+       sk_msg_iter_var(i);
        do {
                int move_from;
 
@@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data,
                sg[move_from].page_link = 0;
                sg[move_from].offset = 0;
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (1);
        msg->sg_end -= shift;
        if (msg->sg_end < 0)
                msg->sg_end += MAX_SKB_FRAGS;
 out:
-       msg->data = sg_virt(&sg[i]) + start - offset;
+       msg->data = sg_virt(&sg[first_sg]) + start - offset;
        msg->data_end = msg->data + bytes;
 
        return 0;
@@ -7281,7 +7286,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_reuseport_md, ip_protocol):
-               BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
+               BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
                SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
                                                    BPF_W, 0);
                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
index 24431e5..60c9288 100644 (file)
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
 
        rtnl_lock();
        tab = rtnl_msg_handlers[protocol];
+       if (!tab) {
+               rtnl_unlock();
+               return;
+       }
        RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
        for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
                link = tab[msgindex];
index c996c09..b2c807f 100644 (file)
@@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 
        WARN_ON_ONCE(!in_task());
 
-       if (!sock_flag(sk, SOCK_ZEROCOPY))
-               return NULL;
-
        skb = sock_omalloc(sk, 0, GFP_KERNEL);
        if (!skb)
                return NULL;
index e63c554..9f3209f 100644 (file)
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
-#include <linux/of_gpio.h>
 #include <linux/netdevice.h>
 #include <linux/sysfs.h>
 #include <linux/phy_fixed.h>
 #include <linux/ptp_classify.h>
-#include <linux/gpio/consumer.h>
 #include <linux/etherdevice.h>
 
 #include "dsa_priv.h"
index cf75f89..4da3944 100644 (file)
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
        spin_lock(&im->lock);
        im->tm_running = 0;
 
-       if (im->unsolicit_count) {
-               im->unsolicit_count--;
+       if (im->unsolicit_count && --im->unsolicit_count)
                igmp_start_timer(im, unsolicited_report_interval(in_dev));
-       }
+
        im->reporter = 1;
        spin_unlock(&im->lock);
 
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
 
        if (in_dev->dead)
                return;
+
+       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
                spin_lock_bh(&im->lock);
                igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
                              unsigned int mode)
 {
        struct ip_mc_list *im;
-#ifdef CONFIG_IP_MULTICAST
-       struct net *net = dev_net(in_dev->dev);
-#endif
 
        ASSERT_RTNL();
 
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
        timer_setup(&im->timer, igmp_timer_expire, 0);
-       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
 #endif
 
        im->next_rcu = in_dev->mc_list;
index 88281fb..e722712 100644 (file)
@@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
                        nextp = &fp->next;
                        fp->prev = NULL;
                        memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+                       fp->sk = NULL;
                        head->data_len += fp->len;
                        head->len += fp->len;
                        if (head->ip_summed != fp->ip_summed)
index 51a5d06..8cce0e9 100644 (file)
@@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
 
        if (tpi->proto == htons(ETH_P_TEB))
                itn = net_generic(net, gre_tap_net_id);
+       else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+                tpi->proto == htons(ETH_P_ERSPAN2))
+               itn = net_generic(net, erspan_net_id);
        else
                itn = net_generic(net, ipgre_net_id);
 
@@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
                return PACKET_RCVD;
        }
+       return PACKET_REJECT;
+
 drop:
        kfree_skb(skb);
        return PACKET_RCVD;
@@ -1508,11 +1513,14 @@ nla_put_failure:
 
 static void erspan_setup(struct net_device *dev)
 {
+       struct ip_tunnel *t = netdev_priv(dev);
+
        ether_setup(dev);
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        ip_tunnel_setup(dev, erspan_net_id);
+       t->erspan_ver = 1;
 }
 
 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
index d9504ad..184bf2e 100644 (file)
@@ -106,6 +106,10 @@ config NF_NAT_IPV4
 
 if NF_NAT_IPV4
 
+config NF_NAT_MASQUERADE_IPV4
+       bool
+
+if NF_TABLES
 config NFT_CHAIN_NAT_IPV4
        depends on NF_TABLES_IPV4
        tristate "IPv4 nf_tables nat chain support"
@@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4
          packet transformations such as the source, destination address and
          source and destination ports.
 
-config NF_NAT_MASQUERADE_IPV4
-       bool
-
 config NFT_MASQ_IPV4
        tristate "IPv4 masquerading support for nf_tables"
        depends on NF_TABLES_IPV4
@@ -135,6 +136,7 @@ config NFT_REDIR_IPV4
        help
          This is the expression that provides IPv4 redirect support for
          nf_tables.
+endif # NF_TABLES
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
index b8af2fe..10c6246 100644 (file)
@@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 
        flags = msg->msg_flags;
 
-       if (flags & MSG_ZEROCOPY && size) {
+       if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
                if (sk->sk_state != TCP_ESTABLISHED) {
                        err = -EINVAL;
                        goto out_err;
index 4c2dd9f..4cf2f7b 100644 (file)
@@ -6367,8 +6367,8 @@ static bool tcp_syn_flood_action(const struct sock *sk,
        if (!queue->synflood_warned &&
            net->ipv4.sysctl_tcp_syncookies != 2 &&
            xchg(&queue->synflood_warned, 1) == 0)
-               pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
-                       proto, ntohs(tcp_hdr(skb)->dest), msg);
+               net_info_ratelimited("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
+                                    proto, ntohs(tcp_hdr(skb)->dest), msg);
 
        return want_cookie;
 }
index 75ef332..12affb7 100644 (file)
@@ -184,8 +184,9 @@ kill:
                                inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
                        }
+               } else {
+                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
                }
-               inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
index 673bba3..9a4261e 100644 (file)
@@ -938,14 +938,14 @@ static int __init inet6_init(void)
 
        err = proto_register(&pingv6_prot, 1);
        if (err)
-               goto out_unregister_ping_proto;
+               goto out_unregister_raw_proto;
 
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
         */
        err = rawv6_init();
        if (err)
-               goto out_unregister_raw_proto;
+               goto out_unregister_ping_proto;
 
        /* Register the family here so that the init calls below will
         * be able to create sockets. (?? is this dangerous ??)
@@ -1113,11 +1113,11 @@ netfilter_fail:
 igmp_fail:
        ndisc_cleanup();
 ndisc_fail:
-       ip6_mr_cleanup();
+       icmpv6_cleanup();
 icmp_fail:
-       unregister_pernet_subsys(&inet6_net_ops);
+       ip6_mr_cleanup();
 ipmr_fail:
-       icmpv6_cleanup();
+       unregister_pernet_subsys(&inet6_net_ops);
 register_pernet_fail:
        sock_unregister(PF_INET6);
        rtnl_unregister_all(PF_INET6);
index c861a6d..5516f55 100644 (file)
@@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                        fib6_clean_expires(iter);
                                else
                                        fib6_set_expires(iter, rt->expires);
-                               fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
+
+                               if (rt->fib6_pmtu)
+                                       fib6_metric_set(iter, RTAX_MTU,
+                                                       rt->fib6_pmtu);
                                return -EEXIST;
                        }
                        /* If we have the same destination and the same metric,
index 18a3794..e493b04 100644 (file)
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
        if (data[IFLA_GRE_COLLECT_METADATA])
                parms->collect_md = true;
 
+       parms->erspan_ver = 1;
        if (data[IFLA_GRE_ERSPAN_VER])
                parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
index 5df2a58..419960b 100644 (file)
@@ -1188,7 +1188,15 @@ route_lookup:
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_frag_opts(skb, &opt.ops, &proto);
        }
-       hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
+
+       if (hop_limit == 0) {
+               if (skb->protocol == htons(ETH_P_IP))
+                       hop_limit = ip_hdr(skb)->ttl;
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       hop_limit = ipv6_hdr(skb)->hop_limit;
+               else
+                       hop_limit = ip6_dst_hoplimit(dst);
+       }
 
        /* Calculate max headroom for all the headers and adjust
         * needed_headroom if necessary.
index 5095367..eeaf745 100644 (file)
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        mtu = dst_mtu(dst);
-       if (!skb->ignore_df && skb->len > mtu) {
+       if (skb->len > mtu) {
                skb_dst_update_pmtu(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
index 2a14d8b..8f68a51 100644 (file)
@@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
                else if (head->ip_summed == CHECKSUM_COMPLETE)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
+               fp->sk = NULL;
        }
        sub_frag_mem_limit(fq->q.net, head->truesize);
 
index c4ea13e..18e00ce 100644 (file)
@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
        rt->rt6i_src = ort->fib6_src;
 #endif
        rt->rt6i_prefsrc = ort->fib6_prefsrc;
-       rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
 }
 
 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
index a21d8ed..e2f16a0 100644 (file)
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
        skb->dev = iucv->hs_dev;
-       if (!skb->dev)
-               return -ENODEV;
-       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
-               return -ENETDOWN;
+       if (!skb->dev) {
+               err = -ENODEV;
+               goto err_free;
+       }
+       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+               err = -ENETDOWN;
+               goto err_free;
+       }
        if (skb->len > skb->dev->mtu) {
-               if (sock->sk_type == SOCK_SEQPACKET)
-                       return -EMSGSIZE;
-               else
-                       skb_trim(skb, skb->dev->mtu);
+               if (sock->sk_type == SOCK_SEQPACKET) {
+                       err = -EMSGSIZE;
+                       goto err_free;
+               }
+               skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
        nskb = skb_clone(skb, GFP_ATOMIC);
-       if (!nskb)
-               return -ENOMEM;
+       if (!nskb) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
        skb_queue_tail(&iucv->send_skb_q, nskb);
        err = dev_queue_xmit(skb);
        if (net_xmit_eval(err)) {
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
        }
        return net_xmit_eval(err);
+
+err_free:
+       kfree_skb(skb);
+       return err;
 }
 
 static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                err = afiucv_hs_send(&txmsg, sk, skb, 0);
                if (err) {
                        atomic_dec(&iucv->msg_sent);
-                       goto fail;
+                       goto out;
                }
        } else { /* Classic VM IUCV transport */
                skb_queue_tail(&iucv->send_skb_q, skb);
@@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        struct sock *sk;
        struct iucv_sock *iucv;
        struct af_iucv_trans_hdr *trans_hdr;
+       int err = NET_RX_SUCCESS;
        char nullstring[8];
-       int err = 0;
 
        if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
                WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
@@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
-               ;
+               kfree_skb(skb);
        }
 
        return err;
index 8f7ef16..eb502c6 100644 (file)
@@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev)
  * Returns 0 if there are still iucv pathes defined
  *        1 if there are no iucv pathes defined
  */
-int iucv_path_table_empty(void)
+static int iucv_path_table_empty(void)
 {
        int i;
 
index 6449a1c..f0f5fed 100644 (file)
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
        if (len < IEEE80211_DEAUTH_FRAME_LEN)
                return;
 
-       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
        sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
        auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
        auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-       ibss_dbg(sdata,
-                "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+       ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+                mgmt->bssid, auth_transaction);
 
        if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
                return;
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                rx_timestamp = drv_get_tsf(local, sdata);
        }
 
-       ibss_dbg(sdata,
-                "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+       ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
                 mgmt->sa, mgmt->bssid,
-                (unsigned long long)rx_timestamp,
+                (unsigned long long)rx_timestamp);
+       ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
                 (unsigned long long)beacon_timestamp,
                 (unsigned long long)(rx_timestamp - beacon_timestamp),
                 jiffies);
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
 
        tx_last_beacon = drv_tx_last_beacon(local);
 
-       ibss_dbg(sdata,
-                "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+       ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+                mgmt->bssid, tx_last_beacon);
 
        if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
                return;
index 4fb2709..5136278 100644 (file)
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
 
        flush_work(&local->radar_detected_work);
        rtnl_lock();
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /*
+                * XXX: there may be more work for other vif types and even
+                * for station mode: a good thing would be to run most of
+                * the iface type's dependent _stop (ieee80211_mg_stop,
+                * ieee80211_ibss_stop) etc...
+                * For now, fix only the specific bug that was seen: race
+                * between csa_connection_drop_work and us.
+                */
+               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+                       /*
+                        * This worker is scheduled from the iface worker that
+                        * runs on mac80211's workqueue, so we can't be
+                        * scheduling this worker after the cancel right here.
+                        * The exception is ieee80211_chswitch_done.
+                        * Then we can have a race...
+                        */
+                       cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+               }
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+       }
        ieee80211_scan_cancel(local);
 
        /* make sure any new ROC will consider local->in_reconfig */
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
                cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
                            IEEE80211_VHT_CAP_SHORT_GI_80 |
                            IEEE80211_VHT_CAP_SHORT_GI_160 |
-                           IEEE80211_VHT_CAP_RXSTBC_1 |
-                           IEEE80211_VHT_CAP_RXSTBC_2 |
-                           IEEE80211_VHT_CAP_RXSTBC_3 |
-                           IEEE80211_VHT_CAP_RXSTBC_4 |
+                           IEEE80211_VHT_CAP_RXSTBC_MASK |
                            IEEE80211_VHT_CAP_TXSTBC |
                            IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
                            IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
+       ieee80211_txq_teardown_flows(local);
 
        rtnl_lock();
 
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
        skb_queue_purge(&local->skb_queue_tdls_chsw);
-       ieee80211_txq_teardown_flows(local);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index 35ad398..daf9db3 100644 (file)
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                forward = false;
                reply = true;
                target_metric = 0;
+
+               if (SN_GT(target_sn, ifmsh->sn))
+                       ifmsh->sn = target_sn;
+
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
index 7fb9957..3dbecae 100644 (file)
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
         */
 
        if (sdata->reserved_chanctx) {
+               struct ieee80211_supported_band *sband = NULL;
+               struct sta_info *mgd_sta = NULL;
+               enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
                /*
                 * with multi-vif csa driver may call ieee80211_csa_finish()
                 * many times while waiting for other interfaces to use their
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                if (sdata->reserved_ready)
                        goto out;
 
+               if (sdata->vif.bss_conf.chandef.width !=
+                   sdata->csa_chandef.width) {
+                       /*
+                        * For managed interface, we need to also update the AP
+                        * station bandwidth and align the rate scale algorithm
+                        * on the bandwidth change. Here we only consider the
+                        * bandwidth of the new channel definition (as channel
+                        * switch flow does not have the full HT/VHT/HE
+                        * information), assuming that if additional changes are
+                        * required they would be done as part of the processing
+                        * of the next beacon from the AP.
+                        */
+                       switch (sdata->csa_chandef.width) {
+                       case NL80211_CHAN_WIDTH_20_NOHT:
+                       case NL80211_CHAN_WIDTH_20:
+                       default:
+                               bw = IEEE80211_STA_RX_BW_20;
+                               break;
+                       case NL80211_CHAN_WIDTH_40:
+                               bw = IEEE80211_STA_RX_BW_40;
+                               break;
+                       case NL80211_CHAN_WIDTH_80:
+                               bw = IEEE80211_STA_RX_BW_80;
+                               break;
+                       case NL80211_CHAN_WIDTH_80P80:
+                       case NL80211_CHAN_WIDTH_160:
+                               bw = IEEE80211_STA_RX_BW_160;
+                               break;
+                       }
+
+                       mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+                       sband =
+                               local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+               }
+
+               if (sdata->vif.bss_conf.chandef.width >
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                ret = ieee80211_vif_use_reserved_context(sdata);
                if (ret) {
                        sdata_info(sdata,
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                        goto out;
                }
 
+               if (sdata->vif.bss_conf.chandef.width <
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                goto out;
        }
 
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                         cbss->beacon_interval));
        return;
  drop_connection:
+       /*
+        * This is just so that the disconnect flow will know that
+        * we were trying to switch channel and failed. In case the
+        * mode is 1 (we are not allowed to Tx), we will know not to
+        * send a deauthentication frame. Those two fields will be
+        * reset when the disconnection worker runs.
+        */
+       sdata->vif.csa_active = true;
+       sdata->csa_block_tx = csa_ie.mode;
+
        ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
        mutex_unlock(&local->chanctx_mtx);
        mutex_unlock(&local->mtx);
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+       bool tx;
 
        sdata_lock(sdata);
        if (!ifmgd->associated) {
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
+       tx = !sdata->csa_block_tx;
+
        /* AP is probably out of range (or not reachable for another reason) so
         * remove the bss struct for that AP.
         */
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              true, frame_buf);
+                              tx, frame_buf);
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
        ifmgd->csa_waiting_bcn = false;
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        }
        mutex_unlock(&local->mtx);
 
-       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
                                    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
 
        sdata_unlock(sdata);
index 64742f2..96611d5 100644 (file)
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         */
        if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
+           !is_multicast_ether_addr(hdr->addr1) &&
            (ieee80211_is_mgmt(hdr->frame_control) ||
             ieee80211_is_data(hdr->frame_control)) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
index cd332e3..f353d9d 100644 (file)
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
 }
 
 static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
-                                       struct sk_buff *skb, int headroom,
-                                       int *subframe_len)
+                                       struct sk_buff *skb, int headroom)
 {
-       int amsdu_len = *subframe_len + sizeof(struct ethhdr);
-       int padding = (4 - amsdu_len) & 3;
-
-       if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+       if (skb_headroom(skb) < headroom) {
                I802_DEBUG_INC(local->tx_expand_skb_head);
 
-               if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+               if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
                        wiphy_debug(local->hw.wiphy,
                                    "failed to reallocate TX buffer\n");
                        return false;
                }
        }
 
-       if (padding) {
-               *subframe_len += padding;
-               skb_put_zero(skb, padding);
-       }
-
        return true;
 }
 
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
        if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
                return true;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
-                                        &subframe_len))
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
                return false;
 
        data = skb_push(skb, sizeof(*amsdu_hdr));
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        void *data;
        bool ret = false;
        unsigned int orig_len;
-       int n = 1, nfrags;
+       int n = 2, nfrags, pad = 0;
+       u16 hdrlen;
 
        if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
                return false;
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (skb->len + head->len > max_amsdu_len)
                goto out;
 
-       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
-               goto out;
-
        nfrags = 1 + skb_shinfo(skb)->nr_frags;
        nfrags += 1 + skb_shinfo(head)->nr_frags;
        frag_tail = &skb_shinfo(head)->frag_list;
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (max_frags && nfrags > max_frags)
                goto out;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
-                                        &subframe_len))
+       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
                goto out;
 
+       /*
+        * Pad out the previous subframe to a multiple of 4 by adding the
+        * padding to the next one, that's being added. Note that head->len
+        * is the length of the full A-MSDU, but that works since each time
+        * we add a new subframe we pad out the previous one to a multiple
+        * of 4 and thus it no longer matters in the next round.
+        */
+       hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
+       if ((head->len - hdrlen) & 3)
+               pad = 4 - ((head->len - hdrlen) & 3);
+
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
+                                                    2 + pad))
+               goto out_recalc;
+
        ret = true;
        data = skb_push(skb, ETH_ALEN + 2);
        memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        memcpy(data, &len, 2);
        memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
 
+       memset(skb_push(skb, pad), 0, pad);
+
        head->len += skb->len;
        head->data_len += skb->len;
        *frag_tail = skb;
 
-       flow->backlog += head->len - orig_len;
-       tin->backlog_bytes += head->len - orig_len;
-
-       fq_recalc_backlog(fq, tin, flow);
+out_recalc:
+       if (head->len != orig_len) {
+               flow->backlog += head->len - orig_len;
+               tin->backlog_bytes += head->len - orig_len;
 
+               fq_recalc_backlog(fq, tin, flow);
+       }
 out:
        spin_unlock_bh(&fq->lock);
 
index 88efda7..716cd64 100644 (file)
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
        const struct ieee80211_reg_rule *rrule;
-       struct ieee80211_wmm_ac *wmm_ac;
+       const struct ieee80211_wmm_ac *wmm_ac;
        u16 center_freq = 0;
 
        if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 
        rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
 
-       if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
+       if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
                rcu_read_unlock();
                return;
        }
 
        if (sdata->vif.type == NL80211_IFTYPE_AP)
-               wmm_ac = &rrule->wmm_rule->ap[ac];
+               wmm_ac = &rrule->wmm_rule.ap[ac];
        else
-               wmm_ac = &rrule->wmm_rule->client[ac];
+               wmm_ac = &rrule->wmm_rule.client[ac];
        qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
        qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
        qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
-       qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
-               min_t(u16, qparam->txop, wmm_ac->cot / 32);
+       qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
        rcu_read_unlock();
 }
 
index 71709c1..f61c306 100644 (file)
@@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM
        depends on NETFILTER_ADVANCED
        ---help---
          This option adds a `CHECKSUM' target, which can be used in the iptables mangle
-         table.
+         table to work around buggy DHCP clients in virtualized environments.
 
-         You can use this target to compute and fill in the checksum in
-         a packet that lacks a checksum.  This is particularly useful,
-         if you need to work around old applications such as dhcp clients,
-         that do not work well with checksum offloads, but don't want to disable
-         checksum offload in your device.
+         Some old DHCP clients drop packets because they are not aware
+         that the checksum would normally be offloaded to hardware and
+         thus should be considered valid.
+         This target can be used to fill in the checksum using iptables
+         when such packets are sent via a virtual network device.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
index 9f14b0d..51c5d7e 100644 (file)
@@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
 };
 #endif
 
+static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
+{
+       u8 nfproto = (unsigned long)_nfproto;
+
+       if (nf_ct_l3num(ct) != nfproto)
+               return 0;
+
+       if (nf_ct_protonum(ct) == IPPROTO_TCP &&
+           ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
+               ct->proto.tcp.seen[0].td_maxwin = 0;
+               ct->proto.tcp.seen[1].td_maxwin = 0;
+       }
+
+       return 0;
+}
+
 static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
 {
        struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+       bool fixup_needed = false;
        int err = 0;
 
        mutex_lock(&nf_ct_proto_mutex);
@@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
                                            ARRAY_SIZE(ipv4_conntrack_ops));
                if (err)
                        cnet->users4 = 0;
+               else
+                       fixup_needed = true;
                break;
 #if IS_ENABLED(CONFIG_IPV6)
        case NFPROTO_IPV6:
@@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
                                            ARRAY_SIZE(ipv6_conntrack_ops));
                if (err)
                        cnet->users6 = 0;
+               else
+                       fixup_needed = true;
                break;
 #endif
        default:
@@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
        }
  out_unlock:
        mutex_unlock(&nf_ct_proto_mutex);
+
+       if (fixup_needed)
+               nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
+                                         (void *)(unsigned long)nfproto, 0, 0);
+
        return err;
 }
 
index 8c58f96..f3f91ed 100644 (file)
@@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
                        timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
                }
        }
+
+       timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
        return 0;
 }
 
@@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
        [CTA_TIMEOUT_DCCP_CLOSING]      = { .type = NLA_U32 },
        [CTA_TIMEOUT_DCCP_TIMEWAIT]     = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 /* template, data assigned later */
@@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
                dn->dccp_timeout[CT_DCCP_CLOSEREQ]      = 64 * HZ;
                dn->dccp_timeout[CT_DCCP_CLOSING]       = 64 * HZ;
                dn->dccp_timeout[CT_DCCP_TIMEWAIT]      = 2 * DCCP_MSL;
+
+               /* timeouts[0] is unused, make it same as SYN_SENT so
+                * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+                */
+               dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
        }
 
        return dccp_kmemdup_sysctl_table(net, pn, dn);
@@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
@@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
                .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
                .nla_policy     = dccp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
@@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
@@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
                .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
                .nla_policy     = dccp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
index ac4a0b2..1df3244 100644 (file)
@@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
        return ret;
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -113,7 +113,7 @@ static const struct nla_policy
 generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
        [CTA_TIMEOUT_GENERIC_TIMEOUT]   = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table generic_sysctl_table[] = {
@@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
        .pkt_to_tuple           = generic_pkt_to_tuple,
        .packet                 = generic_packet,
        .new                    = generic_new,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = generic_timeout_nlattr_to_obj,
                .obj_to_nlattr  = generic_timeout_obj_to_nlattr,
@@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = generic_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = generic_init_net,
        .get_net_proto          = generic_get_net_proto,
 };
index d163225..650eb4f 100644 (file)
@@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct)
                nf_ct_gre_keymap_destroy(master);
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
        [CTA_TIMEOUT_GRE_UNREPLIED]     = { .type = NLA_U32 },
        [CTA_TIMEOUT_GRE_REPLIED]       = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 static int gre_init_net(struct net *net, u_int16_t proto)
 {
@@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
        .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
        .nla_policy      = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout    = {
                .nlattr_to_obj  = gre_timeout_nlattr_to_obj,
                .obj_to_nlattr  = gre_timeout_obj_to_nlattr,
@@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
                .obj_size       = sizeof(unsigned int) * GRE_CT_MAX,
                .nla_policy     = gre_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .net_id         = &proto_gre_net_id,
        .init_net       = gre_init_net,
 };
index 036670b..43c7e1a 100644 (file)
@@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -313,7 +313,7 @@ static const struct nla_policy
 icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
        [CTA_TIMEOUT_ICMP_TIMEOUT]      = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table icmp_sysctl_table[] = {
@@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
        .nlattr_to_tuple        = icmp_nlattr_to_tuple,
        .nla_policy             = icmp_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = icmp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = icmp_timeout_obj_to_nlattr,
@@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = icmp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = icmp_init_net,
        .get_net_proto          = icmp_get_net_proto,
 };
index bed07b9..97e40f7 100644 (file)
@@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -314,7 +314,7 @@ static const struct nla_policy
 icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
        [CTA_TIMEOUT_ICMPV6_TIMEOUT]    = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table icmpv6_sysctl_table[] = {
@@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
        .nlattr_to_tuple        = icmpv6_nlattr_to_tuple,
        .nla_policy             = icmpv6_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = icmpv6_timeout_nlattr_to_obj,
                .obj_to_nlattr  = icmpv6_timeout_obj_to_nlattr,
@@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = icmpv6_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = icmpv6_init_net,
        .get_net_proto          = icmpv6_get_net_proto,
 };
index 8d1e085..e4d738d 100644 (file)
@@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
                        timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
                }
        }
+
+       timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
        return 0;
 }
 
@@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
        [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]      = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 
 #ifdef CONFIG_SYSCTL
@@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
 
                for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
                        sn->timeouts[i] = sctp_timeouts[i];
+
+               /* timeouts[0] is unused, init it so ->timeouts[0] contains
+                * 'new' timeout, like udp or icmp.
+                */
+               sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
        }
 
        return sctp_kmemdup_sysctl_table(pn, sn);
@@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
@@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
                .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
                .nla_policy     = sctp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
@@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
@@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
                .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
                .nla_policy     = sctp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#endif
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
index d80d322..b4bdf9e 100644 (file)
@@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                timeouts[TCP_CONNTRACK_SYN_SENT] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
        }
+
        if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
                timeouts[TCP_CONNTRACK_SYN_RECV] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
@@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                timeouts[TCP_CONNTRACK_UNACK] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
        }
+
+       timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
        return 0;
 }
 
@@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
        [CTA_TIMEOUT_TCP_RETRANS]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_TCP_UNACK]         = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table tcp_sysctl_table[] = {
@@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
                for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
                        tn->timeouts[i] = tcp_timeouts[i];
 
+               /* timeouts[0] is unused, make it same as SYN_SENT so
+                * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+                */
+               tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
                tn->tcp_loose = nf_ct_tcp_loose;
                tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
                tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
@@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
        .nlattr_size            = TCP_NLATTR_SIZE,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
@@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
                                        TCP_CONNTRACK_TIMEOUT_MAX,
                .nla_policy     = tcp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
@@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
        .nlattr_tuple_size      = tcp_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
@@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
                                        TCP_CONNTRACK_TIMEOUT_MAX,
                .nla_policy     = tcp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
index 7a1b898..3065fb8 100644 (file)
@@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
        [CTA_TIMEOUT_UDP_UNREPLIED]     = { .type = NLA_U32 },
        [CTA_TIMEOUT_UDP_REPLIED]       = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table udp_sysctl_table[] = {
@@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
 #endif
-#include <net/netfilter/nf_conntrack_timeout.h>
index 1dca568..2cfb173 100644 (file)
@@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
        }
        set->ndeact++;
 
+       nft_set_elem_deactivate(ctx->net, set, elem);
        nft_trans_elem_set(trans) = set;
        nft_trans_elem(trans) = *elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
index d46a236..a30f8ba 100644 (file)
@@ -489,8 +489,8 @@ err:
        return err;
 }
 
-static struct ctnl_timeout *
-ctnl_timeout_find_get(struct net *net, const char *name)
+static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
+                                                  const char *name)
 {
        struct ctnl_timeout *timeout, *matching = NULL;
 
@@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
                break;
        }
 err:
-       return matching;
+       return matching ? &matching->timeout : NULL;
 }
 
 static void ctnl_timeout_put(struct nf_ct_timeout *t)
index ea4ba55..d33094f 100644 (file)
@@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        int err;
 
        if (verdict == NF_ACCEPT ||
+           verdict == NF_REPEAT ||
            verdict == NF_STOP) {
                rcu_read_lock();
                ct_hook = rcu_dereference(nf_ct_hook);
index 26a8bae..5dd8774 100644 (file)
@@ -799,7 +799,7 @@ err:
 }
 
 struct nft_ct_timeout_obj {
-       struct nf_conn          *tmpl;
+       struct nf_ct_timeout    *timeout;
        u8                      l4proto;
 };
 
@@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
 {
        const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
        struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
-       struct sk_buff *skb = pkt->skb;
+       struct nf_conn_timeout *timeout;
+       const unsigned int *values;
+
+       if (priv->l4proto != pkt->tprot)
+               return;
 
-       if (ct ||
-           priv->l4proto != pkt->tprot)
+       if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
                return;
 
-       nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
+       timeout = nf_ct_timeout_find(ct);
+       if (!timeout) {
+               timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
+               if (!timeout) {
+                       regs->verdict.code = NF_DROP;
+                       return;
+               }
+       }
+
+       rcu_assign_pointer(timeout->timeout, priv->timeout);
+
+       /* adjust the timeout as per 'new' state. ct is unconfirmed,
+        * so the current timestamp must not be added.
+        */
+       values = nf_ct_timeout_data(timeout);
+       if (values)
+               nf_ct_refresh(ct, pkt->skb, values[0]);
 }
 
 static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
                                   const struct nlattr * const tb[],
                                   struct nft_object *obj)
 {
-       const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
        struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
        const struct nf_conntrack_l4proto *l4proto;
-       struct nf_conn_timeout *timeout_ext;
        struct nf_ct_timeout *timeout;
        int l3num = ctx->family;
-       struct nf_conn *tmpl;
        __u8 l4num;
        int ret;
 
@@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
 
        timeout->l3num = l3num;
        timeout->l4proto = l4proto;
-       tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
-       if (!tmpl) {
-               ret = -ENOMEM;
-               goto err_free_timeout;
-       }
-
-       timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
-       if (!timeout_ext) {
-               ret = -ENOMEM;
-               goto err_free_tmpl;
-       }
 
        ret = nf_ct_netns_get(ctx->net, ctx->family);
        if (ret < 0)
-               goto err_free_tmpl;
-
-       priv->tmpl = tmpl;
+               goto err_free_timeout;
 
+       priv->timeout = timeout;
        return 0;
 
-err_free_tmpl:
-       nf_ct_tmpl_free(tmpl);
 err_free_timeout:
        kfree(timeout);
 err_proto_put:
@@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
                                       struct nft_object *obj)
 {
        struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
-       struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
-       struct nf_ct_timeout *timeout;
+       struct nf_ct_timeout *timeout = priv->timeout;
 
-       timeout = rcu_dereference_raw(t->timeout);
        nf_ct_untimeout(ctx->net, timeout);
        nf_ct_l4proto_put(timeout->l4proto);
        nf_ct_netns_put(ctx->net, ctx->family);
-       nf_ct_tmpl_free(priv->tmpl);
+       kfree(priv->timeout);
 }
 
 static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
                                   struct nft_object *obj, bool reset)
 {
        const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
-       const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
-       const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
+       const struct nf_ct_timeout *timeout = priv->timeout;
        struct nlattr *nest_params;
        int ret;
 
index 9f4151e..6c7aa6a 100644 (file)
@@ -16,6 +16,9 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CHECKSUM.h>
 
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
 MODULE_DESCRIPTION("Xtables: checksum modification");
@@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM");
 static unsigned int
 checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
+       if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
                skb_checksum_help(skb);
 
        return XT_CONTINUE;
@@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int checksum_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_CHECKSUM_info *einfo = par->targinfo;
+       const struct ip6t_ip6 *i6 = par->entryinfo;
+       const struct ipt_ip *i4 = par->entryinfo;
 
        if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
                pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
@@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
        if (!einfo->operation)
                return -EINVAL;
 
+       switch (par->family) {
+       case NFPROTO_IPV4:
+               if (i4->proto == IPPROTO_UDP &&
+                   (i4->invflags & XT_INV_PROTO) == 0)
+                       return 0;
+               break;
+       case NFPROTO_IPV6:
+               if ((i6->flags & IP6T_F_PROTO) &&
+                   i6->proto == IPPROTO_UDP &&
+                   (i6->invflags & XT_INV_PROTO) == 0)
+                       return 0;
+               break;
+       }
+
+       pr_warn_once("CHECKSUM should be avoided.  If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
        return 0;
 }
 
index dfbdbb2..51d0c25 100644 (file)
@@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
 static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
 {
        struct xt_cluster_match_info *info = par->matchinfo;
+       int ret;
 
        if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
                pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
@@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
                pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
                return -EDOM;
        }
-       return 0;
+
+       ret = nf_ct_netns_get(par->net, par->family);
+       if (ret < 0)
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
+       return ret;
+}
+
+static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
+{
+       nf_ct_netns_put(par->net, par->family);
 }
 
 static struct xt_match xt_cluster_match __read_mostly = {
@@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
        .match          = xt_cluster_mt,
        .checkentry     = xt_cluster_mt_checkentry,
        .matchsize      = sizeof(struct xt_cluster_match_info),
+       .destroy        = xt_cluster_mt_destroy,
        .me             = THIS_MODULE,
 };
 
index 9b16402..3e7d259 100644 (file)
@@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
        __acquires(htable->lock)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket;
 
        spin_lock_bh(&htable->lock);
@@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
 
 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
 
        *pos = ++(*bucket);
@@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
 static void dl_seq_stop(struct seq_file *s, void *v)
        __releases(htable->lock)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
 
        if (!IS_ERR(bucket))
@@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
                               struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
                               struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
                            struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
 
 static int dl_seq_show_v2(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = (unsigned int *)v;
        struct dsthash_ent *ent;
 
@@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v)
 
 static int dl_seq_show_v1(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
        struct dsthash_ent *ent;
 
@@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v)
 
 static int dl_seq_show(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
        struct dsthash_ent *ent;
 
index 5610061..75c92a8 100644 (file)
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
        .close  =       packet_mm_close,
 };
 
-static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+                       unsigned int len)
 {
        int i;
 
        for (i = 0; i < len; i++) {
                if (likely(pg_vec[i].buffer)) {
-                       kvfree(pg_vec[i].buffer);
+                       if (is_vmalloc_addr(pg_vec[i].buffer))
+                               vfree(pg_vec[i].buffer);
+                       else
+                               free_pages((unsigned long)pg_vec[i].buffer,
+                                          order);
                        pg_vec[i].buffer = NULL;
                }
        }
        kfree(pg_vec);
 }
 
-static char *alloc_one_pg_vec_page(unsigned long size)
+static char *alloc_one_pg_vec_page(unsigned long order)
 {
        char *buffer;
+       gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+                         __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
 
-       buffer = kvzalloc(size, GFP_KERNEL);
+       buffer = (char *) __get_free_pages(gfp_flags, order);
        if (buffer)
                return buffer;
 
-       buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+       /* __get_free_pages failed, fall back to vmalloc */
+       buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
+       if (buffer)
+               return buffer;
 
-       return buffer;
+       /* vmalloc failed, lets dig into swap here */
+       gfp_flags &= ~__GFP_NORETRY;
+       buffer = (char *) __get_free_pages(gfp_flags, order);
+       if (buffer)
+               return buffer;
+
+       /* complete and utter failure */
+       return NULL;
 }
 
-static struct pgv *alloc_pg_vec(struct tpacket_req *req)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
 {
        unsigned int block_nr = req->tp_block_nr;
-       unsigned long size = req->tp_block_size;
        struct pgv *pg_vec;
        int i;
 
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
                goto out;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i].buffer = alloc_one_pg_vec_page(size);
+               pg_vec[i].buffer = alloc_one_pg_vec_page(order);
                if (unlikely(!pg_vec[i].buffer))
                        goto out_free_pgvec;
        }
@@ -4184,7 +4200,7 @@ out:
        return pg_vec;
 
 out_free_pgvec:
-       free_pg_vec(pg_vec, block_nr);
+       free_pg_vec(pg_vec, order, block_nr);
        pg_vec = NULL;
        goto out;
 }
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 {
        struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
+       int was_running, order = 0;
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
-       int was_running;
        __be16 num;
        int err = -EINVAL;
        /* Added to avoid minimal code churn */
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
 
                err = -ENOMEM;
-               pg_vec = alloc_pg_vec(req);
+               order = get_order(req->tp_block_size);
+               pg_vec = alloc_pg_vec(req, order);
                if (unlikely(!pg_vec))
                        goto out;
                switch (po->tp_version) {
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                rb->frame_size = req->tp_frame_size;
                spin_unlock_bh(&rb_queue->lock);
 
+               swap(rb->pg_vec_order, order);
                swap(rb->pg_vec_len, req->tp_block_nr);
 
                rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (pg_vec)
-               free_pg_vec(pg_vec, req->tp_block_nr);
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
        return err;
 }
index 8f50036..3bb7c5f 100644 (file)
@@ -64,6 +64,7 @@ struct packet_ring_buffer {
        unsigned int            frame_size;
        unsigned int            frame_max;
 
+       unsigned int            pg_vec_order;
        unsigned int            pg_vec_pages;
        unsigned int            pg_vec_len;
 
index 01b3bd6..b909211 100644 (file)
@@ -1,6 +1,6 @@
 
 config RDS
-       tristate "The RDS Protocol"
+       tristate "The Reliable Datagram Sockets Protocol"
        depends on INET
        ---help---
          The RDS (Reliable Datagram Sockets) protocol provides reliable,
index 3ab5578..762d2c6 100644 (file)
@@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
        struct rds_sock *rs;
 
        __rds_create_bind_key(key, addr, port, scope_id);
-       rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
+       rcu_read_lock();
+       rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
        if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
                rds_sock_addref(rs);
        else
                rs = NULL;
+       rcu_read_unlock();
 
        rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
                 ntohs(port));
@@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
+       sock_set_flag(sk, SOCK_RCU_FREE);
        ret = rds_add_bound(rs, binding_addr, &port, scope_id);
        if (ret)
                goto out;
index c1d9764..eba75c1 100644 (file)
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
 
        if (rds_conn_state(conn) == RDS_CONN_UP) {
                struct rds_ib_device *rds_ibdev;
-               struct rdma_dev_addr *dev_addr;
 
                ic = conn->c_transport_data;
-               dev_addr = &ic->i_cm_id->route.addr.dev_addr;
-               rdma_addr_get_sgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->src_gid);
-               rdma_addr_get_dgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->dst_gid);
-
+               rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
+                              (union ib_gid *)&iinfo6->dst_gid);
                rds_ibdev = ic->rds_ibdev;
                iinfo6->max_send_wr = ic->i_send_ring.w_nr;
                iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
index 00192a9..0f84658 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/rfkill.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
index db83dac..e12f8ef 100644 (file)
@@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
        return ret;
 }
 
+static int tcf_action_destroy_1(struct tc_action *a, int bind)
+{
+       struct tc_action *actions[] = { a, NULL };
+
+       return tcf_action_destroy(actions, bind);
+}
+
 static int tcf_action_put(struct tc_action *p)
 {
        return __tcf_action_put(p, false);
@@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
                err = tcf_action_goto_chain_init(a, tp);
                if (err) {
-                       struct tc_action *actions[] = { a, NULL };
-
-                       tcf_action_destroy(actions, bind);
+                       tcf_action_destroy_1(a, bind);
                        NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
                        return ERR_PTR(err);
                }
        }
 
        if (!tcf_action_valid(a->tcfa_action)) {
-               NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
-               a->tcfa_action = TC_ACT_UNSPEC;
+               tcf_action_destroy_1(a, bind);
+               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               return ERR_PTR(-EINVAL);
        }
 
        return a;
@@ -1173,6 +1179,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
                struct tcf_idrinfo *idrinfo = a->idrinfo;
                u32 act_index = a->tcfa_index;
 
+               actions[i] = NULL;
                if (tcf_action_put(a)) {
                        /* last reference, action was deleted concurrently */
                        module_put(ops->owner);
@@ -1184,7 +1191,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
                        if (ret < 0)
                                return ret;
                }
-               actions[i] = NULL;
        }
        return 0;
 }
index 196430a..06a3d48 100644 (file)
@@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
        return ret;
 }
 
+static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+                                   struct tcf_ife_info *ife, u32 metaid,
+                                   bool exists)
+{
+       int ret;
+
+       if (!try_module_get(ops->owner))
+               return -ENOENT;
+       ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
+       if (ret)
+               module_put(ops->owner);
+       return ret;
+}
+
 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
                        int len, bool exists)
 {
@@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
 
        read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
-               rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists);
+               rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
                if (rc == 0)
                        installed += 1;
        }
@@ -400,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
        struct tcf_meta_info *e, *n;
 
        list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
-               module_put(e->ops->owner);
                list_del(&e->metalist);
                if (e->metaval) {
                        if (e->ops->release)
@@ -408,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
                        else
                                kfree(e->metaval);
                }
+               module_put(e->ops->owner);
                kfree(e);
        }
 }
index 1070340..ad99a99 100644 (file)
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
 {
        struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
 
+       if (!keys_start)
+               goto nla_failure;
        for (; n > 0; n--) {
                struct nlattr *key_start;
 
                key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
+               if (!key_start)
+                       goto nla_failure;
 
                if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
-                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
-                       nlmsg_trim(skb, keys_start);
-                       return -EINVAL;
-               }
+                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
+                       goto nla_failure;
 
                nla_nest_end(skb, key_start);
 
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
        nla_nest_end(skb, keys_start);
 
        return 0;
+nla_failure:
+       nla_nest_cancel(skb, keys_start);
+       return -EINVAL;
 }
 
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
 
        if (p->tcfp_keys_ex) {
-               tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
+               if (tcf_pedit_key_ex_dump(skb,
+                                         p->tcfp_keys_ex,
+                                         p->tcfp_nkeys))
+                       goto nla_put_failure;
 
                if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
                        goto nla_put_failure;
index 4207591..681f6f0 100644 (file)
@@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                                                  &metadata->u.tun_info,
                                                  opts_len, extack);
                        if (ret < 0)
-                               goto err_out;
+                               goto release_tun_meta;
                }
 
                metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                                     &act_tunnel_key_ops, bind, true);
                if (ret) {
                        NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
-                       goto err_out;
+                       goto release_tun_meta;
                }
 
                ret = ACT_P_CREATED;
        } else if (!ovr) {
-               tcf_idr_release(*a, bind);
                NL_SET_ERR_MSG(extack, "TC IDR already exists");
-               return -EEXIST;
+               ret = -EEXIST;
+               goto release_tun_meta;
        }
 
        t = to_tunnel_key(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
                NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               exists = true;
+               goto release_tun_meta;
        }
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
@@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
        return ret;
 
+release_tun_meta:
+       dst_release(&metadata->dst);
+
 err_out:
        if (exists)
                tcf_idr_release(*a, bind);
@@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
                    nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
                               opt->type) ||
                    nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
-                           opt->length * 4, opt + 1))
+                           opt->length * 4, opt + 1)) {
+                       nla_nest_cancel(skb, start);
                        return -EMSGSIZE;
+               }
 
                len -= sizeof(struct geneve_opt) + opt->length * 4;
                src += sizeof(struct geneve_opt) + opt->length * 4;
@@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
                                const struct ip_tunnel_info *info)
 {
        struct nlattr *start;
-       int err;
+       int err = -EINVAL;
 
        if (!info->options_len)
                return 0;
@@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
        if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
                err = tunnel_key_geneve_opts_dump(skb, info);
                if (err)
-                       return err;
+                       goto err_out;
        } else {
-               return -EINVAL;
+err_out:
+               nla_nest_cancel(skb, start);
+               return err;
        }
 
        nla_nest_end(skb, start);
index 31bd143..1a67af8 100644 (file)
@@ -1252,7 +1252,7 @@ replay:
        }
        chain = tcf_chain_get(block, chain_index, true);
        if (!chain) {
-               NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+               NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
                err = -ENOMEM;
                goto errout;
        }
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
                        goto errout;
                }
                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
-               err = -EINVAL;
+               err = -ENOENT;
                goto errout;
        }
 
index ef5c9a8..a644292 100644 (file)
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
 struct sctp_ht_iter {
        struct seq_net_private p;
        struct rhashtable_iter hti;
-       int start_fail;
 };
 
 static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 
        sctp_transport_walk_start(&iter->hti);
 
-       iter->start_fail = 0;
        return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
        struct sctp_ht_iter *iter = seq->private;
 
-       if (iter->start_fail)
-               return;
        sctp_transport_walk_stop(&iter->hti);
 }
 
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
        epb = &assoc->base;
        sk = epb->sk;
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
index e96b15a..f73e9d3 100644 (file)
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        }
 
        if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
-               if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
-                       trans->flowlabel = params->spp_ipv6_flowlabel &
-                                          SCTP_FLOWLABEL_VAL_MASK;
-                       trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
-               } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
-                                           transports) {
-                               if (trans->ipaddr.sa.sa_family != AF_INET6)
-                                       continue;
+               if (trans) {
+                       if (trans->ipaddr.sa.sa_family == AF_INET6) {
                                trans->flowlabel = params->spp_ipv6_flowlabel &
                                                   SCTP_FLOWLABEL_VAL_MASK;
                                trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
                        }
+               } else if (asoc) {
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
+                                           transports) {
+                               if (t->ipaddr.sa.sa_family != AF_INET6)
+                                       continue;
+                               t->flowlabel = params->spp_ipv6_flowlabel &
+                                              SCTP_FLOWLABEL_VAL_MASK;
+                               t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+                       }
                        asoc->flowlabel = params->spp_ipv6_flowlabel &
                                          SCTP_FLOWLABEL_VAL_MASK;
                        asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                        trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        trans->dscp |= SCTP_DSCP_SET_MASK;
                } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
                                            transports) {
-                               trans->dscp = params->spp_dscp &
-                                             SCTP_DSCP_VAL_MASK;
-                               trans->dscp |= SCTP_DSCP_SET_MASK;
+                               t->dscp = params->spp_dscp &
+                                         SCTP_DSCP_VAL_MASK;
+                               t->dscp |= SCTP_DSCP_SET_MASK;
                        }
                        asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        asoc->dscp |= SCTP_DSCP_SET_MASK;
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
                        break;
                }
 
+               if (!sctp_transport_hold(t))
+                       continue;
+
                if (net_eq(sock_net(t->asoc->base.sk), net) &&
                    t->asoc->peer.primary_path == t)
                        break;
+
+               sctp_transport_put(t);
        }
 
        return t;
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
                                              struct rhashtable_iter *iter,
                                              int pos)
 {
-       void *obj = SEQ_START_TOKEN;
+       struct sctp_transport *t;
 
-       while (pos && (obj = sctp_transport_get_next(net, iter)) &&
-              !IS_ERR(obj))
-               pos--;
+       if (!pos)
+               return SEQ_START_TOKEN;
 
-       return obj;
+       while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+               if (!--pos)
+                       break;
+               sctp_transport_put(t);
+       }
+
+       return t;
 }
 
 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -5082,8 +5096,6 @@ again:
 
        tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
        for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
-               if (!sctp_transport_hold(tsp))
-                       continue;
                ret = cb(tsp, p);
                if (ret)
                        break;
index 9ee6cfe..d802654 100644 (file)
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
  * struct tipc_bc_base - base structure for keeping broadcast send state
  * @link: broadcast send link structure
  * @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
  * @primary_bearer: a bearer having links to all broadcast destinations, if any
  * @bcast_support: indicates if primary bearer, if any, supports broadcast
  * @rcast_support: indicates if all peer nodes support replicast
  * @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
  */
 struct tipc_bc_base {
        struct tipc_link *link;
index aaabb0b..73137f4 100644 (file)
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
 
        if (h->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = tipc_dump_start,
                        .dump = tipc_diag_dump,
+                       .done = tipc_dump_done,
                };
                netlink_dump_start(net->diag_nlsk, skb, h, &c);
                return 0;
index 88f027b..66d5b2c 100644 (file)
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        list_for_each_entry(dst, l, list) {
-               if (dst->value != value)
-                       continue;
-               return dst;
+               if (dst->node == node && dst->port == port)
+                       return dst;
        }
        return NULL;
 }
 
 bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        if (tipc_dest_find(l, node, port))
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
        dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
        if (unlikely(!dst))
                return false;
-       dst->value = value;
+       dst->node = node;
+       dst->port = port;
        list_add(&dst->list, l);
        return true;
 }
index 0febba4..892bd75 100644 (file)
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
 
 struct tipc_dest {
        struct list_head list;
-       union {
-               struct {
-                       u32 port;
-                       u32 node;
-               };
-               u64 value;
-       };
+       u32 port;
+       u32 node;
 };
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
index 6ff2254..99ee419 100644 (file)
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
        },
        {
                .cmd    = TIPC_NL_SOCK_GET,
+               .start = tipc_dump_start,
                .dumpit = tipc_nl_sk_dump,
+               .done   = tipc_dump_done,
                .policy = tipc_nl_policy,
        },
        {
index a2f7674..6376467 100644 (file)
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
 
        buf->sk = msg->dst_sk;
+       if (__tipc_dump_start(&cb, msg->net)) {
+               kfree_skb(buf);
+               return -ENOMEM;
+       }
 
        do {
                int rem;
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        err = 0;
 
 err_out:
+       tipc_dump_done(&cb);
        kfree_skb(buf);
 
        if (err == -EMSGSIZE) {
index c1e93c9..3f03ddd 100644 (file)
@@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock)
        sk_stop_timer(sk, &sk->sk_timer);
        tipc_sk_remove(tsk);
 
+       sock_orphan(sk);
        /* Reject any messages that accumulated in backlog queue */
        release_sock(sk);
        tipc_dest_list_purge(&tsk->cong_links);
@@ -2672,6 +2673,8 @@ void tipc_sk_reinit(struct net *net)
 
                rhashtable_walk_stop(&iter);
        } while (tsk == ERR_PTR(-EAGAIN));
+
+       rhashtable_walk_exit(&iter);
 }
 
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3227,45 +3230,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk))
 {
-       struct net *net = sock_net(skb->sk);
-       struct tipc_net *tn = tipc_net(net);
-       const struct bucket_table *tbl;
-       u32 prev_portid = cb->args[1];
-       u32 tbl_id = cb->args[0];
-       struct rhash_head *pos;
+       struct rhashtable_iter *iter = (void *)cb->args[4];
        struct tipc_sock *tsk;
        int err;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
-       for (; tbl_id < tbl->size; tbl_id++) {
-               rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
-                       spin_lock_bh(&tsk->sk.sk_lock.slock);
-                       if (prev_portid && prev_portid != tsk->portid) {
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
+       rhashtable_walk_start(iter);
+       while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+               if (IS_ERR(tsk)) {
+                       err = PTR_ERR(tsk);
+                       if (err == -EAGAIN) {
+                               err = 0;
                                continue;
                        }
+                       break;
+               }
 
-                       err = skb_handler(skb, cb, tsk);
-                       if (err) {
-                               prev_portid = tsk->portid;
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
-                               goto out;
-                       }
-
-                       prev_portid = 0;
-                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               sock_hold(&tsk->sk);
+               rhashtable_walk_stop(iter);
+               lock_sock(&tsk->sk);
+               err = skb_handler(skb, cb, tsk);
+               if (err) {
+                       release_sock(&tsk->sk);
+                       sock_put(&tsk->sk);
+                       goto out;
                }
+               release_sock(&tsk->sk);
+               rhashtable_walk_start(iter);
+               sock_put(&tsk->sk);
        }
+       rhashtable_walk_stop(iter);
 out:
-       rcu_read_unlock();
-       cb->args[0] = tbl_id;
-       cb->args[1] = prev_portid;
-
        return skb->len;
 }
 EXPORT_SYMBOL(tipc_nl_sk_walk);
 
+int tipc_dump_start(struct netlink_callback *cb)
+{
+       return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+       /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+       struct rhashtable_iter *iter = (void *)cb->args[4];
+       struct tipc_net *tn = tipc_net(net);
+
+       if (!iter) {
+               iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+               if (!iter)
+                       return -ENOMEM;
+
+               cb->args[4] = (long)iter;
+       }
+
+       rhashtable_walk_enter(&tn->sk_rht, iter);
+       return 0;
+}
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+       struct rhashtable_iter *hti = (void *)cb->args[4];
+
+       rhashtable_walk_exit(hti);
+       kfree(hti);
+       return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tipc_sock *tsk, u32 sk_filter_state,
                           u64 (*tipc_diag_gen_cookie)(struct sock *sk))
index aff9b2a..5e575f2 100644 (file)
@@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                    int (*skb_handler)(struct sk_buff *skb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
+int tipc_dump_done(struct netlink_callback *cb);
 #endif
index c8e34ef..2627b5d 100644 (file)
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
        conn_put(con);
 }
 
-/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
- * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
  */
 void tipc_topsrv_queue_evt(struct net *net, int conid,
                           u32 event, struct tipc_event *evt)
index 52fbe72..e28a6ff 100644 (file)
@@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
                         &ctx->sg_encrypted_num_elem,
                         &ctx->sg_encrypted_size, 0);
 
+       if (rc == -ENOSPC)
+               ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
+
        return rc;
 }
 
@@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
                         &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
                         tls_ctx->pending_open_record_frags);
 
+       if (rc == -ENOSPC)
+               ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
+
        return rc;
 }
 
index 5fb9b7d..4b8ec65 100644 (file)
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
                        goto nla_put_failure;
 
                if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
-                               rule->wmm_rule->client[j].cw_min) ||
+                               rule->wmm_rule.client[j].cw_min) ||
                    nla_put_u16(msg, NL80211_WMMR_CW_MAX,
-                               rule->wmm_rule->client[j].cw_max) ||
+                               rule->wmm_rule.client[j].cw_max) ||
                    nla_put_u8(msg, NL80211_WMMR_AIFSN,
-                              rule->wmm_rule->client[j].aifsn) ||
-                   nla_put_u8(msg, NL80211_WMMR_TXOP,
-                              rule->wmm_rule->client[j].cot))
+                              rule->wmm_rule.client[j].aifsn) ||
+                   nla_put_u16(msg, NL80211_WMMR_TXOP,
+                               rule->wmm_rule.client[j].cot))
                        goto nla_put_failure;
 
                nla_nest_end(msg, nl_wmm_rule);
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
 
        if (large) {
                const struct ieee80211_reg_rule *rule =
-                       freq_reg_info(wiphy, chan->center_freq);
+                       freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
 
-               if (!IS_ERR(rule) && rule->wmm_rule) {
+               if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
                        if (nl80211_msg_put_wmm_rules(msg, rule))
                                goto nla_put_failure;
                }
@@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        if (!info->attrs[NL80211_ATTR_MDID] ||
+           !info->attrs[NL80211_ATTR_IE] ||
            !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
index 4fc66a1..2f702ad 100644 (file)
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
 reg_copy_regd(const struct ieee80211_regdomain *src_regd)
 {
        struct ieee80211_regdomain *regd;
-       int size_of_regd, size_of_wmms;
+       int size_of_regd;
        unsigned int i;
-       struct ieee80211_wmm_rule *d_wmm, *s_wmm;
 
        size_of_regd =
                sizeof(struct ieee80211_regdomain) +
                src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
-       size_of_wmms = src_regd->n_wmm_rules *
-               sizeof(struct ieee80211_wmm_rule);
 
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
        memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
 
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
-       memcpy(d_wmm, s_wmm, size_of_wmms);
-
-       for (i = 0; i < src_regd->n_reg_rules; i++) {
+       for (i = 0; i < src_regd->n_reg_rules; i++)
                memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
                       sizeof(struct ieee80211_reg_rule));
-               if (!src_regd->reg_rules[i].wmm_rule)
-                       continue;
 
-               regd->reg_rules[i].wmm_rule = d_wmm +
-                       (src_regd->reg_rules[i].wmm_rule - s_wmm) /
-                       sizeof(struct ieee80211_wmm_rule);
-       }
        return regd;
 }
 
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
        return true;
 }
 
-static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
+static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
                         struct fwdb_wmm_rule *wmm)
 {
+       struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
        unsigned int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
                rule->ap[i].aifsn = wmm->ap[i].aifsn;
                rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
        }
+
+       rrule->has_wmm = true;
 }
 
 static int __regdb_query_wmm(const struct fwdb_header *db,
                             const struct fwdb_country *country, int freq,
-                            u32 *dbptr, struct ieee80211_wmm_rule *rule)
+                            struct ieee80211_reg_rule *rule)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
                        wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
                        wmm = (void *)((u8 *)db + wmm_ptr);
                        set_wmm_rule(rule, wmm);
-                       if (dbptr)
-                               *dbptr = wmm_ptr;
                        return 0;
                }
        }
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
        return -ENODATA;
 }
 
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
-                       struct ieee80211_wmm_rule *rule)
+int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
 {
        const struct fwdb_header *hdr = regdb;
        const struct fwdb_country *country;
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
        country = &hdr->country[0];
        while (country->coll_ptr) {
                if (alpha2_equal(alpha2, country->alpha2))
-                       return __regdb_query_wmm(regdb, country, freq, dbptr,
-                                                rule);
+                       return __regdb_query_wmm(regdb, country, freq, rule);
 
                country++;
        }
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
 }
 EXPORT_SYMBOL(reg_query_regdb_wmm);
 
-struct wmm_ptrs {
-       struct ieee80211_wmm_rule *rule;
-       u32 ptr;
-};
-
-static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
-                                              u32 wmm_ptr, int n_wmms)
-{
-       int i;
-
-       for (i = 0; i < n_wmms; i++) {
-               if (wmm_ptrs[i].ptr == wmm_ptr)
-                       return wmm_ptrs[i].rule;
-       }
-       return NULL;
-}
-
 static int regdb_query_country(const struct fwdb_header *db,
                               const struct fwdb_country *country)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
        struct ieee80211_regdomain *regdom;
-       struct ieee80211_regdomain *tmp_rd;
-       unsigned int size_of_regd, i, n_wmms = 0;
-       struct wmm_ptrs *wmm_ptrs;
+       unsigned int size_of_regd, i;
 
        size_of_regd = sizeof(struct ieee80211_regdomain) +
                coll->n_rules * sizeof(struct ieee80211_reg_rule);
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
        if (!regdom)
                return -ENOMEM;
 
-       wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
-       if (!wmm_ptrs) {
-               kfree(regdom);
-               return -ENOMEM;
-       }
-
        regdom->n_reg_rules = coll->n_rules;
        regdom->alpha2[0] = country->alpha2[0];
        regdom->alpha2[1] = country->alpha2[1];
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
                                1000 * be16_to_cpu(rule->cac_timeout);
                if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
                        u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
-                       struct ieee80211_wmm_rule *wmm_pos =
-                               find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
-                       struct fwdb_wmm_rule *wmm;
-                       struct ieee80211_wmm_rule *wmm_rule;
-
-                       if (wmm_pos) {
-                               rrule->wmm_rule = wmm_pos;
-                               continue;
-                       }
-                       wmm = (void *)((u8 *)db + wmm_ptr);
-                       tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
-                                         sizeof(struct ieee80211_wmm_rule),
-                                         GFP_KERNEL);
-
-                       if (!tmp_rd) {
-                               kfree(regdom);
-                               kfree(wmm_ptrs);
-                               return -ENOMEM;
-                       }
-                       regdom = tmp_rd;
-
-                       wmm_rule = (struct ieee80211_wmm_rule *)
-                               ((u8 *)regdom + size_of_regd + n_wmms *
-                               sizeof(struct ieee80211_wmm_rule));
+                       struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
 
-                       set_wmm_rule(wmm_rule, wmm);
-                       wmm_ptrs[n_wmms].ptr = wmm_ptr;
-                       wmm_ptrs[n_wmms++].rule = wmm_rule;
+                       set_wmm_rule(rrule, wmm);
                }
        }
-       kfree(wmm_ptrs);
 
        return reg_schedule_apply(regdom);
 }
index e0825a0..959ed3a 100644 (file)
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
                                          u8 *op_class)
 {
        u8 vht_opclass;
-       u16 freq = chandef->center_freq1;
+       u32 freq = chandef->center_freq1;
 
        if (freq >= 2412 && freq <= 2472) {
                if (chandef->width > NL80211_CHAN_WIDTH_40)
index 5219280..161b022 100755 (executable)
@@ -380,6 +380,7 @@ our $Attribute      = qr{
                        __noclone|
                        __deprecated|
                        __read_mostly|
+                       __ro_after_init|
                        __kprobes|
                        $InitAttribute|
                        ____cacheline_aligned|
@@ -3311,7 +3312,7 @@ sub process {
                        # known declaration macros
                      $sline =~ /^\+\s+$declaration_macros/ ||
                        # start of struct or union or enum
-                     $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
+                     $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
                        # start or end of block or continuation of declaration
                      $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
                        # bitfield continuation
index 999d585..e083bca 100755 (executable)
@@ -11,13 +11,14 @@ DEPMOD=$1
 KERNELRELEASE=$2
 
 if ! test -r System.map ; then
+       echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
        exit 0
 fi
 
 if [ -z $(command -v $DEPMOD) ]; then
-       echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+       echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
        echo "This is probably in the kmod package." >&2
-       exit 1
+       exit 0
 fi
 
 # older versions of depmod require the version string to start with three
index 4a7bd21..67ed9f6 100644 (file)
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
 
 # check if necessary packages are available, and configure build flags
 define filechk_conf_cfg
-       $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
        $(CONFIG_SHELL) $<
 endef
 
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644 (file)
index 7a1c40b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Check for pkg-config presence
-
-if [ -z $(command -v pkg-config) ]; then
-       echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
-       exit 1
-fi
index 533b3d8..480ecd8 100755 (executable)
@@ -3,6 +3,13 @@
 
 PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if ! pkg-config --exists $PKG; then
        echo >&2 "*"
        echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
index e6f9fac..c812872 100755 (executable)
@@ -4,20 +4,23 @@
 PKG="ncursesw"
 PKG2="ncurses"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw\"
index 83b5836..143c05f 100644 (file)
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
                        switch (prop->type) {
                        case P_MENU:
                                child_count++;
-                               prompt = prompt;
                                if (single_menu_mode) {
                                        item_make("%s%*c%s",
                                                  menu->data ? "-->" : "++>",
index 42f5ac7..001559e 100644 (file)
@@ -4,20 +4,23 @@
 PKG="ncursesw menuw panelw"
 PKG2="ncurses menu panel"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw -lmenuw -lpanelw\"
index 0862e15..02ccc0a 100755 (executable)
@@ -4,6 +4,13 @@
 PKG="Qt5Core Qt5Gui Qt5Widgets"
 PKG2="QtCore QtGui"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if pkg-config --exists $PKG; then
        echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
        echo libs=\"$(pkg-config --libs $PKG)\"
index fe06e77..f599031 100755 (executable)
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") {
     $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
     $type = ".quad";
     $alignment = 2;
+} elsif ($arch eq "nds32") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
+    $alignment = 2;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
index 71f3941..79f7dd5 100755 (executable)
@@ -74,7 +74,7 @@ scm_version()
                fi
 
                # Check for uncommitted changes
-               if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+               if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
                        printf '%s' -dirty
                fi
 
index 27d8b26..d9aa521 100644 (file)
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
 config PAGE_TABLE_ISOLATION
        bool "Remove the kernel mapping in user mode"
        default y
-       depends on X86 && !UML
+       depends on (X86_64 || X86_PAE) && !UML
        help
          This feature reduces the number of hardware side channels by
          ensuring that the majority of kernel addresses are not mapped
index f2f22d0..4ccec1b 100644 (file)
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
        struct aa_label *label = aa_secid_to_label(secid);
        int len;
 
-       AA_BUG(!secdata);
        AA_BUG(!seclen);
 
        if (!label)
index 711e89d..3b602a1 100644 (file)
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
        }
        dh_inputs.g_size = dlen;
 
-       dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+       dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
        if (dlen < 0) {
                ret = dlen;
                goto out2;
index 69517e1..08d5662 100644 (file)
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
                runtime->avail = 0;
        else
                runtime->avail = runtime->buffer_size;
-       runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL);
+       runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL);
        if (!runtime->buffer) {
                kfree(runtime);
                return -ENOMEM;
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
        if (params->avail_min < 1 || params->avail_min > params->buffer_size)
                return -EINVAL;
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = kvmalloc(params->buffer_size, GFP_KERNEL);
+               newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
                spin_lock_irq(&runtime->lock);
index 1bd2757..a835558 100644 (file)
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
  */
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream)
 {
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start);
 
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream)
 
        snd_hdac_ext_link_stream_clear(stream);
 
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST);
        udelay(3);
        timeout = 50;
        do {
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
                                 int stream)
 {
-       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream));
+       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
 
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
                                 bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->spbcap) {
                dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
+               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask);
        else
                snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
 }
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
                                bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->drsmcap) {
                dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
+               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask);
        else
                snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
 }
index 0a50855..26d348b 100644 (file)
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
 
        list_for_each_codec(codec, bus) {
                /* FIXME: maybe a better way needed for forced reset */
-               cancel_delayed_work_sync(&codec->jackpoll_work);
+               if (current_work() != &codec->jackpoll_work.work)
+                       cancel_delayed_work_sync(&codec->jackpoll_work);
 #ifdef CONFIG_PM
                if (hda_codec_is_power_on(codec)) {
                        hda_call_codec_suspend(codec);
index 16e006f..4602464 100644 (file)
@@ -27,6 +27,7 @@
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
 struct kvm_arch_memory_slot {
 };
 
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 serror_pending;
+               __u8 serror_has_esr;
+               /* Align it to 8 bytes */
+               __u8 pad[6];
+               __u64 serror_esr;
+       } exception;
+       __u32 reserved[12];
+};
+
 /* If you need to interpret the index values, here is the key: */
 #define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
 #define KVM_REG_ARM_COPROC_SHIFT       16
index 4e76630..97c3478 100644 (file)
@@ -39,6 +39,7 @@
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
 struct kvm_arch_memory_slot {
 };
 
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 serror_pending;
+               __u8 serror_has_esr;
+               /* Align it to 8 bytes */
+               __u8 pad[6];
+               __u64 serror_esr;
+       } exception;
+       __u32 reserved[12];
+};
+
 /* If you need to interpret the index values, here is the key: */
 #define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
 #define KVM_REG_ARM_COPROC_SHIFT       16
index 4cdaa55..9a50f02 100644 (file)
@@ -4,7 +4,7 @@
 /*
  * KVM s390 specific structures and definitions
  *
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2018
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_FPRS   (1UL << 8)
 #define KVM_SYNC_GSCB   (1UL << 9)
 #define KVM_SYNC_BPBC   (1UL << 10)
+#define KVM_SYNC_ETOKEN (1UL << 11)
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
                struct {
                        __u64 reserved1[2];
                        __u64 gscb[4];
+                       __u64 etoken;
+                       __u64 etoken_extension;
                };
        };
 };
index c535c2f..86299ef 100644 (file)
@@ -378,4 +378,41 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 
+#define KVM_STATE_NESTED_GUEST_MODE    0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING   0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON     0x00000002
+
+struct kvm_vmx_nested_state {
+       __u64 vmxon_pa;
+       __u64 vmcs_pa;
+
+       struct {
+               __u16 flags;
+       } smm;
+};
+
+/* for KVM_CAP_NESTED_STATE */
+struct kvm_nested_state {
+       /* KVM_STATE_* flags */
+       __u16 flags;
+
+       /* 0 for VMX, 1 for SVM.  */
+       __u16 format;
+
+       /* 128 for SVM, 128 + VMCS size for VMX.  */
+       __u32 size;
+
+       union {
+               /* VMXON, VMCS */
+               struct kvm_vmx_nested_state vmx;
+
+               /* Pad the header to 128 bytes.  */
+               __u8 pad[120];
+       };
+
+       __u8 data[0];
+};
+
 #endif /* _ASM_X86_KVM_H */
index b2ec20e..b455930 100644 (file)
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
        [BPF_MAP_TYPE_DEVMAP]           = "devmap",
        [BPF_MAP_TYPE_SOCKMAP]          = "sockmap",
        [BPF_MAP_TYPE_CPUMAP]           = "cpumap",
+       [BPF_MAP_TYPE_XSKMAP]           = "xskmap",
        [BPF_MAP_TYPE_SOCKHASH]         = "sockhash",
        [BPF_MAP_TYPE_CGROUP_STORAGE]   = "cgroup_storage",
 };
index dbf6e8b..bbb2a8e 100644 (file)
@@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size)
                 * Found a match; just move the remaining
                 * entries up.
                 */
-               if (i == num_records) {
+               if (i == (num_records - 1)) {
                        kvp_file_info[pool].num_records--;
                        kvp_update_file(pool);
                        return 0;
index 6b0c36a..e569972 100644 (file)
@@ -30,9 +30,12 @@ struct task_struct {
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        gfp_t lockdep_reclaim_gfp;
        int pid;
+       int state;
        char comm[17];
 };
 
+#define TASK_RUNNING 0
+
 extern struct task_struct *__curr(void);
 
 #define current (__curr())
diff --git a/tools/include/linux/nmi.h b/tools/include/linux/nmi.h
new file mode 100644 (file)
index 0000000..e69de29
index 4299067..df4bedb 100644 (file)
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free,     sys_pkey_free)
 __SYSCALL(__NR_statx,     sys_statx)
 #define __NR_io_pgetevents 292
 __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
 
 #undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
 
 /*
  * 32 bit systems traditionally used different
index 9c660e1..300f336 100644 (file)
@@ -687,6 +687,15 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
 
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS    5
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index cf01b68..43391e2 100644 (file)
@@ -164,6 +164,8 @@ enum {
        IFLA_CARRIER_UP_COUNT,
        IFLA_CARRIER_DOWN_COUNT,
        IFLA_NEW_IFINDEX,
+       IFLA_MIN_MTU,
+       IFLA_MAX_MTU,
        __IFLA_MAX
 };
 
@@ -334,6 +336,7 @@ enum {
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
        IFLA_BRPORT_ISOLATED,
+       IFLA_BRPORT_BACKUP_PORT,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
 
 #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
 
+/* XFRM section */
+enum {
+       IFLA_XFRM_UNSPEC,
+       IFLA_XFRM_LINK,
+       IFLA_XFRM_IF_ID,
+       __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
 enum macsec_validation_type {
        MACSEC_VALIDATE_DISABLED = 0,
        MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
        XDP_ATTACHED_DRV,
        XDP_ATTACHED_SKB,
        XDP_ATTACHED_HW,
+       XDP_ATTACHED_MULTI,
 };
 
 enum {
@@ -928,6 +942,9 @@ enum {
        IFLA_XDP_ATTACHED,
        IFLA_XDP_FLAGS,
        IFLA_XDP_PROG_ID,
+       IFLA_XDP_DRV_PROG_ID,
+       IFLA_XDP_SKB_PROG_ID,
+       IFLA_XDP_HW_PROG_ID,
        __IFLA_XDP_MAX,
 };
 
index b6270a3..07548de 100644 (file)
@@ -949,6 +949,9 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
 #define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1391,6 +1394,9 @@ struct kvm_enc_region {
 /* Available with KVM_CAP_HYPERV_EVENTFD */
 #define KVM_HYPERV_EVENTFD        _IOW(KVMIO,  0xbd, struct kvm_hyperv_eventfd)
 
+/* Available with KVM_CAP_NESTED_STATE */
+#define KVM_GET_NESTED_STATE         _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
+#define KVM_SET_NESTED_STATE         _IOW(KVMIO,  0xbf, struct kvm_nested_state)
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index eeb787b..f35eb72 100644 (file)
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
 
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 /*
index c51f8e5..84c3de8 100644 (file)
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
 };
 
 #define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
 
 struct vhost_msg {
        int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
        };
 };
 
+struct vhost_msg_v2 {
+       __u32 type;
+       __u32 reserved;
+       union {
+               struct vhost_iotlb_msg iotlb;
+               __u8 padding[64];
+       };
+};
+
 struct vhost_memory_region {
        __u64 guest_phys_addr;
        __u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
 #define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24,      \
                                         struct vhost_vring_state)
 
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
 /* VHOST_NET specific defines */
 
 /* Attach virtio net ring to a raw socket, or tap device.
index b3d1b12..5224ade 100644 (file)
@@ -777,14 +777,12 @@ endif
        $(call QUIET_INSTALL, libexec) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
 ifndef NO_LIBBPF
-       $(call QUIET_INSTALL, lib) \
-               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, include/bpf) \
-               $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, lib) \
-               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, examples/bpf) \
-               $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, bpf-headers) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+               $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, bpf-examples) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
+               $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
 endif
        $(call QUIET_INSTALL, perf-archive) \
                $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
index f013b11..dbef716 100644 (file)
@@ -11,7 +11,8 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 
 out    := $(OUTPUT)arch/arm64/include/generated/asm
 header := $(out)/syscalls.c
-sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+incpath := $(srctree)/tools
+sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h
 sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
 systbl := $(sysprf)/mksyscalltbl
 
@@ -19,7 +20,7 @@ systbl := $(sysprf)/mksyscalltbl
 _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
 
 $(header): $(sysdef) $(systbl)
-       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
 
 clean::
        $(call QUIET_CLEAN, arm64) $(RM) $(header)
index 52e1973..2dbb8ca 100755 (executable)
@@ -11,7 +11,8 @@
 
 gcc=$1
 hostcc=$2
-input=$3
+incpath=$3
+input=$4
 
 if ! test -r $input; then
        echo "Could not read input file" >&2
@@ -28,7 +29,6 @@ create_table_from_c()
 
        cat <<-_EoHEADER
                #include <stdio.h>
-               #define __ARCH_WANT_RENAMEAT
                #include "$input"
                int main(int argc, char *argv[])
                {
@@ -42,7 +42,7 @@ create_table_from_c()
        printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
        printf "}\n"
 
-       } | $hostcc -o $create_table_exe -x c -
+       } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
 
        $create_table_exe
 
index 20e7d74..10a44e9 100644 (file)
@@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
 
 #endif
 
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
 int arch__choose_best_symbol(struct symbol *syma,
                             struct symbol *symb __maybe_unused)
 {
        char *sym = syma->name;
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
        /* Skip over any initial dot */
        if (*sym == '.')
                sym++;
+#endif
 
        /* Avoid "SyS" kernel syscall aliases */
        if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
        return SYMBOL_A;
 }
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 /* Allow matching against dot variants */
 int arch__compare_symbol_names(const char *namea, const char *nameb)
 {
index c1bd979..613709c 100644 (file)
@@ -9,6 +9,7 @@ struct test;
 int test__rdpmc(struct test *test __maybe_unused, int subtest);
 int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
 int test__insn_x86(struct test *test __maybe_unused, int subtest);
+int test__bp_modify(struct test *test, int subtest);
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
index 8e2c5a3..586849f 100644 (file)
@@ -5,3 +5,4 @@ libperf-y += arch-tests.o
 libperf-y += rdpmc.o
 libperf-y += perf-time-to-tsc.o
 libperf-$(CONFIG_AUXTRACE) += insn-x86.o
+libperf-$(CONFIG_X86_64) += bp-modify.o
index cc1802f..d47d3f8 100644 (file)
@@ -23,6 +23,12 @@ struct test arch_tests[] = {
                .desc = "x86 instruction decoder - new instructions",
                .func = test__insn_x86,
        },
+#endif
+#if defined(__x86_64__)
+       {
+               .desc = "x86 bp modify",
+               .func = test__bp_modify,
+       },
 #endif
        {
                .func = NULL,
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
new file mode 100644 (file)
index 0000000..f53e440
--- /dev/null
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/user.h>
+#include <syscall.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ptrace.h>
+#include <asm/ptrace.h>
+#include <errno.h>
+#include "debug.h"
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+static noinline int bp_1(void)
+{
+       pr_debug("in %s\n", __func__);
+       return 0;
+}
+
+static noinline int bp_2(void)
+{
+       pr_debug("in %s\n", __func__);
+       return 0;
+}
+
+static int spawn_child(void)
+{
+       int child = fork();
+
+       if (child == 0) {
+               /*
+                * The child sets itself for as tracee and
+                * waits in signal for parent to trace it,
+                * then it calls bp_1 and quits.
+                */
+               int err = ptrace(PTRACE_TRACEME, 0, NULL, NULL);
+
+               if (err) {
+                       pr_debug("failed to PTRACE_TRACEME\n");
+                       exit(1);
+               }
+
+               raise(SIGCONT);
+               bp_1();
+               exit(0);
+       }
+
+       return child;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it and checks it was properly changed.
+ */
+static int bp_modify1(void)
+{
+       pid_t child;
+       int status;
+       unsigned long rip = 0, dr7 = 1;
+
+       child = spawn_child();
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 1\n");
+               return TEST_FAIL;
+       }
+
+       /*
+        * The parent does following steps:
+        *  - creates a new breakpoint (id 0) for bp_2 function
+        *  - changes that breakponit to bp_1 function
+        *  - waits for the breakpoint to hit and checks
+        *    it has proper rip of bp_1 function
+        *  - detaches the child
+        */
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_2)) {
+               pr_debug("failed to set breakpoint, 1st time: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_1)) {
+               pr_debug("failed to set breakpoint, 2nd time: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[7]), dr7)) {
+               pr_debug("failed to set dr7: %s\n", strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+               goto out;
+       }
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 2\n");
+               return TEST_FAIL;
+       }
+
+       rip = ptrace(PTRACE_PEEKUSER, child,
+                    offsetof(struct user_regs_struct, rip), NULL);
+       if (rip == (unsigned long) -1) {
+               pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+       if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+               return TEST_FAIL;
+       }
+
+       return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it to bogus value and checks the original
+ * breakpoint is hit.
+ */
+static int bp_modify2(void)
+{
+       pid_t child;
+       int status;
+       unsigned long rip = 0, dr7 = 1;
+
+       child = spawn_child();
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 1\n");
+               return TEST_FAIL;
+       }
+
+       /*
+        * The parent does following steps:
+        *  - creates a new breakpoint (id 0) for bp_1 function
+        *  - tries to change that breakpoint to (-1) address
+        *  - waits for the breakpoint to hit and checks
+        *    it has proper rip of bp_1 function
+        *  - detaches the child
+        */
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_1)) {
+               pr_debug("failed to set breakpoint: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[7]), dr7)) {
+               pr_debug("failed to set dr7: %s\n", strerror(errno));
+               goto out;
+       }
+
+       if (!ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) {
+               pr_debug("failed, breakpoint set to bogus address\n");
+               goto out;
+       }
+
+       if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+               goto out;
+       }
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 2\n");
+               return TEST_FAIL;
+       }
+
+       rip = ptrace(PTRACE_PEEKUSER, child,
+                    offsetof(struct user_regs_struct, rip), NULL);
+       if (rip == (unsigned long) -1) {
+               pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+       if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+               return TEST_FAIL;
+       }
+
+       return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+int test__bp_modify(struct test *test __maybe_unused,
+                   int subtest __maybe_unused)
+{
+       TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
+       TEST_ASSERT_VAL("modify test 2 failed\n", !bp_modify2());
+
+       return 0;
+}
index 20061cf..28cd6a1 100644 (file)
@@ -246,8 +246,14 @@ find_target:
 
 indirect_call:
        tok = strchr(endptr, '*');
-       if (tok != NULL)
-               ops->target.addr = strtoull(tok + 1, NULL, 16);
+       if (tok != NULL) {
+               endptr++;
+
+               /* Indirect call can use a non-rip register and offset: callq  *0x8(%rbx).
+                * Do not parse such instruction.  */
+               if (strstr(endptr, "(%r") == NULL)
+                       ops->target.addr = strtoull(endptr, NULL, 16);
+       }
        goto find_target;
 }
 
@@ -276,7 +282,19 @@ bool ins__is_call(const struct ins *ins)
        return ins->ops == &call_ops || ins->ops == &s390_call_ops;
 }
 
-static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms)
+/*
+ * Prevents from matching commas in the comment section, e.g.:
+ * ffff200008446e70:       b.cs    ffff2000084470f4 <generic_exec_single+0x314>  // b.hs, b.nlast
+ */
+static inline const char *validate_comma(const char *c, struct ins_operands *ops)
+{
+       if (ops->raw_comment && c > ops->raw_comment)
+               return NULL;
+
+       return c;
+}
+
+static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
 {
        struct map *map = ms->map;
        struct symbol *sym = ms->sym;
@@ -285,6 +303,10 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
        };
        const char *c = strchr(ops->raw, ',');
        u64 start, end;
+
+       ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+       c = validate_comma(c, ops);
+
        /*
         * Examples of lines to parse for the _cpp_lex_token@@Base
         * function:
@@ -304,6 +326,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
                ops->target.addr = strtoull(c, NULL, 16);
                if (!ops->target.addr) {
                        c = strchr(c, ',');
+                       c = validate_comma(c, ops);
                        if (c++ != NULL)
                                ops->target.addr = strtoull(c, NULL, 16);
                }
@@ -361,9 +384,12 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
 
        c = strchr(ops->raw, ',');
+       c = validate_comma(c, ops);
+
        if (c != NULL) {
                const char *c2 = strchr(c + 1, ',');
 
+               c2 = validate_comma(c2, ops);
                /* check for 3-op insn */
                if (c2 != NULL)
                        c = c2;
index 005a5fe..5399ba2 100644 (file)
@@ -22,6 +22,7 @@ struct ins {
 
 struct ins_operands {
        char    *raw;
+       char    *raw_comment;
        struct {
                char    *raw;
                char    *name;
index c980bbf..1a61628 100644 (file)
@@ -251,8 +251,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
 {
        struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
 
-       if (evsel != NULL)
-               perf_evsel__init(evsel, attr, idx);
+       if (!evsel)
+               return NULL;
+       perf_evsel__init(evsel, attr, idx);
 
        if (perf_evsel__is_bpf_output(evsel)) {
                evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
index 36d0763..6a6929f 100644 (file)
@@ -576,6 +576,13 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
        return NULL;
 }
 
+static bool map__contains_symbol(struct map *map, struct symbol *sym)
+{
+       u64 ip = map->unmap_ip(map, sym->start);
+
+       return ip >= map->start && ip < map->end;
+}
+
 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
                                         struct map **mapp)
 {
@@ -591,6 +598,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
 
                if (sym == NULL)
                        continue;
+               if (!map__contains_symbol(pos, sym)) {
+                       sym = NULL;
+                       continue;
+               }
                if (mapp != NULL)
                        *mapp = pos;
                goto out;
index c85d0d1..7b0ca7c 100644 (file)
@@ -377,7 +377,7 @@ out:
 
 static int record_saved_cmdline(void)
 {
-       unsigned int size;
+       unsigned long long size;
        char *path;
        struct stat st;
        int ret, err = 0;
index 920b1d5..e76214f 100644 (file)
@@ -164,16 +164,15 @@ void parse_ftrace_printk(struct tep_handle *pevent,
 void parse_saved_cmdline(struct tep_handle *pevent,
                         char *file, unsigned int size __maybe_unused)
 {
-       char *comm;
+       char comm[17]; /* Max comm length in the kernel is 16. */
        char *line;
        char *next = NULL;
        int pid;
 
        line = strtok_r(file, "\n", &next);
        while (line) {
-               sscanf(line, "%d %ms", &pid, &comm);
-               tep_register_comm(pevent, comm, pid);
-               free(comm);
+               if (sscanf(line, "%d %16s", &pid, comm) == 2)
+                       tep_register_comm(pevent, comm, pid);
                line = strtok_r(NULL, "\n", &next);
        }
 }
index f8cc38a..32a194e 100755 (executable)
@@ -46,6 +46,9 @@
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
+# Some systems don't have a ping6 binary anymore
+which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
+
 tests="
        pmtu_vti6_exception             vti6: PMTU exceptions
        pmtu_vti4_exception             vti4: PMTU exceptions
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
        mtu "${ns_b}" veth_b 4000
        mtu "${ns_a}" vti6_a 5000
        mtu "${ns_b}" vti6_b 5000
-       ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+       ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
 
        # Check that exception was created
        if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
        fail=0
 
        min=68
-       max=$((65528 - 20))
+       max=$((65535 - 20))
        # Check invalid values first
        for v in $((min - 1)) $((max + 1)); do
                ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
index f03763d..30f9b54 100644 (file)
             "$TC actions flush action police"
         ]
     },
+    {
+        "id": "6aaf",
+        "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action police index 1",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
+    {
+        "id": "29b1",
+        "name": "Add police actions with conform-exceed control <invalid>/drop",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action police",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
     {
         "id": "c26f",
         "name": "Add police action with invalid peakrate value",
index 30cb0a0..37908a8 100644 (file)
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = {
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug",
-       "/debug",
-       0,
-};
-
 /*
  * data structures
  */
index f82c2ea..334b16d 100644 (file)
@@ -30,8 +30,8 @@ struct slabinfo {
        int alias;
        int refs;
        int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-       int hwcache_align, object_size, objs_per_slab;
-       int sanity_checks, slab_size, store_user, trace;
+       unsigned int hwcache_align, object_size, objs_per_slab;
+       unsigned int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
        unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;