Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Aug 2018 18:25:07 +0000 (11:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Aug 2018 18:25:07 +0000 (11:25 -0700)
Pull scheduler updates from Thomas Gleixner:

 - Cleanup and improvement of NUMA balancing

 - Refactoring and improvements to the PELT (Per Entity Load Tracking)
   code

 - Watchdog simplification and related cleanups

 - The usual pile of small incremental fixes and improvements

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits)
  watchdog: Reduce message verbosity
  stop_machine: Reflow cpu_stop_queue_two_works()
  sched/numa: Move task_numa_placement() closer to numa_migrate_preferred()
  sched/numa: Use group_weights to identify if migration degrades locality
  sched/numa: Update the scan period without holding the numa_group lock
  sched/numa: Remove numa_has_capacity()
  sched/numa: Modify migrate_swap() to accept additional parameters
  sched/numa: Remove unused task_capacity from 'struct numa_stats'
  sched/numa: Skip nodes that are at 'hoplimit'
  sched/debug: Reverse the order of printing faults
  sched/numa: Use task faults only if numa_group is not yet set up
  sched/numa: Set preferred_node based on best_cpu
  sched/numa: Simplify load_too_imbalanced()
  sched/numa: Evaluate move once per node
  sched/numa: Remove redundant field
  sched/debug: Show the sum wait time of a task group
  sched/fair: Remove #ifdefs from scale_rt_capacity()
  sched/core: Remove get_cpu() from sched_fork()
  sched/cpufreq: Clarify sugov_get_util()
  sched/sysctl: Remove unused sched_time_avg_ms sysctl
  ...

562 files changed:
Documentation/RCU/Design/Data-Structures/Data-Structures.html
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
Documentation/RCU/stallwarn.txt
Documentation/RCU/whatisRCU.txt
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
Documentation/networking/dpaa2/overview.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/cache.h
arch/arc/include/asm/delay.h
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arc/plat-eznps/include/plat/ctop.h
arch/arc/plat-eznps/mtm.c
arch/arm/Kconfig
arch/arm/include/asm/efi.h
arch/arm/include/asm/irq.h
arch/arm/include/asm/mach/arch.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/irq.c
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/mach-rpc/ecard.c
arch/arm64/Kconfig
arch/arm64/crypto/aes-ce-ccm-core.S
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/tlb.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/irq.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/ia64/include/asm/tlb.h
arch/ia64/mm/init.c
arch/m68k/Kconfig
arch/m68k/apollo/config.c
arch/m68k/atari/config.c
arch/m68k/atari/time.c
arch/m68k/bvme6000/config.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/bitops.h
arch/m68k/include/asm/dma-mapping.h [deleted file]
arch/m68k/include/asm/io.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/io_no.h
arch/m68k/include/asm/kmap.h
arch/m68k/include/asm/machdep.h
arch/m68k/include/asm/macintosh.h
arch/m68k/include/asm/page_no.h
arch/m68k/kernel/dma.c
arch/m68k/kernel/setup_mm.c
arch/m68k/kernel/setup_no.c
arch/m68k/mac/config.c
arch/m68k/mac/misc.c
arch/m68k/mm/init.c
arch/m68k/mm/mcfmmu.c
arch/m68k/mm/motorola.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/m68k/q40/config.c
arch/m68k/sun3/config.c
arch/mips/ath79/common.c
arch/mips/bcm47xx/setup.c
arch/mips/include/asm/mipsregs.h
arch/mips/pci/pci.c
arch/openrisc/Kconfig
arch/openrisc/include/asm/irq.h
arch/openrisc/kernel/irq.c
arch/parisc/Kconfig
arch/parisc/include/asm/barrier.h [new file with mode: 0644]
arch/parisc/kernel/entry.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/syscall.S
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/kernel/pci-common.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/setup.c
arch/s390/Kconfig
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/msi.h [deleted file]
arch/sparc/kernel/time_64.c
arch/sparc/mm/srmmu.c
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/boot/compressed/pgtable_64.c
arch/x86/crypto/aegis128-aesni-glue.c
arch/x86/crypto/aegis128l-aesni-glue.c
arch/x86/crypto/aegis256-aesni-glue.c
arch/x86/crypto/morus1280-avx2-glue.c
arch/x86/crypto/morus1280-sse2-glue.c
arch/x86/crypto/morus640-sse2-glue.c
arch/x86/entry/entry_64.S
arch/x86/events/amd/ibs.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/qspinlock_paravirt.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/net/bpf_jit_comp32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
arch/x86/um/mem_32.c
block/bio.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/psloop.c
drivers/base/dd.c
drivers/block/nbd.c
drivers/block/zram/zram_drv.c
drivers/char/mem.c
drivers/char/random.c
drivers/clk/clk-aspeed.c
drivers/clk/clk.c
drivers/clk/meson/clk-audio-divider.c
drivers/clk/meson/gxbb.c
drivers/clk/mvebu/armada-37xx-periph.c
drivers/clk/qcom/gcc-msm8996.c
drivers/clk/qcom/mmcc-msm8996.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/padlock-aes.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/runtime-wrappers.c
drivers/gpio/gpio-uniphier.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/ipu-v3/ipu-csi.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-xlp9xx.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-mux.c
drivers/infiniband/core/uverbs_cmd.c
drivers/input/keyboard/hilkbd.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/serio/i8042-x86ia64io.h
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its-platform-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-ingenic.c
drivers/irqchip/irq-stm32-exti.c
drivers/media/platform/vsp1/vsp1_drm.c
drivers/media/rc/bpf-lirc.c
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-main.c
drivers/mmc/host/mxcmmc.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/can/m_can/m_can.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/netdevsim/devlink.c
drivers/net/phy/mdio-mux-bcm-iproc.c
drivers/net/phy/phy.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/xen-netfront.c
drivers/nubus/bus.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/rdma.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/pci/bus.c
drivers/pci/controller/pcie-mobiveil.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci.h
drivers/pci/pcie/err.c
drivers/pci/probe.c
drivers/pci/remove.c
drivers/phy/broadcom/phy-brcm-usb-init.c
drivers/phy/motorola/phy-mapphone-mdm6600.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/scsi_error.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/vmw_pvscsi.c
drivers/staging/android/ashmem.c
drivers/staging/ks7010/ks_hostif.c
drivers/staging/media/omap4iss/iss_video.c
drivers/staging/rtl8188eu/Kconfig
drivers/staging/rtl8188eu/core/rtw_recv.c
drivers/staging/rtl8188eu/core/rtw_security.c
drivers/staging/speakup/speakup_soft.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/Makefile
drivers/usb/chipidea/ci.h
drivers/usb/chipidea/ulpi.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/aspeed-vhub/ep0.c
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/aspeed-vhub/vhub.h
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/host/xhci.c
drivers/usb/phy/phy-fsl-usb.c
drivers/usb/typec/tcpm.c
drivers/vhost/vhost.c
drivers/video/fbdev/efifb.c
drivers/virtio/virtio_balloon.c
fs/block_dev.c
fs/cachefiles/bind.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/dcache.c
fs/efivarfs/inode.c
fs/exec.c
fs/ext4/balloc.c
fs/ext4/ialloc.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mmp.c
fs/ext4/super.c
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/object.c
fs/fscache/operation.c
fs/hugetlbfs/inode.c
fs/iomap.c
fs/jfs/jfs_dinode.h
fs/jfs/jfs_incore.h
fs/jfs/super.c
fs/namespace.c
fs/nfs/nfs4proc.c
fs/squashfs/block.c
fs/squashfs/cache.c
fs/squashfs/file.c
fs/squashfs/file_cache.c
fs/squashfs/file_direct.c
fs/squashfs/fragment.c
fs/squashfs/squashfs.h
fs/squashfs/squashfs_fs.h
fs/squashfs/squashfs_fs_sb.h
fs/squashfs/super.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_inode_buf.c
include/linux/blk-mq.h
include/linux/bpfilter.h
include/linux/cpu.h
include/linux/delayacct.h
include/linux/efi.h
include/linux/eventfd.h
include/linux/irqchip/arm-gic-v3.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/ring_buffer.h
include/linux/rtmutex.h
include/linux/srcu.h
include/linux/torture.h
include/net/af_vsock.h
include/net/cfg80211.h
include/net/ip6_fib.h
include/net/llc.h
include/net/netfilter/nf_tables.h
include/net/tcp.h
include/trace/events/rcu.h
include/uapi/linux/btf.h
include/uapi/linux/perf_event.h
init/main.c
ipc/sem.c
ipc/shm.c
kernel/auditsc.c
kernel/bpf/arraymap.c
kernel/bpf/btf.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/sockmap.c
kernel/cpu.c
kernel/events/core.c
kernel/fork.c
kernel/irq/Kconfig
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/proc.c
kernel/kthread.c
kernel/locking/locktorture.c
kernel/locking/rtmutex.c
kernel/memremap.c
kernel/rcu/rcu.h
kernel/rcu/rcuperf.c
kernel/rcu/rcutorture.c
kernel/rcu/srcutree.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/softirq.c
kernel/stop_machine.c
kernel/time/tick-sched.c
kernel/torture.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_kprobe.c
lib/Kconfig.kasan
lib/Kconfig.ubsan
lib/debugobjects.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/mempolicy.c
mm/mmap.c
mm/nommu.c
mm/shmem.c
mm/zswap.c
net/caif/caif_dev.c
net/core/dev.c
net/core/filter.c
net/core/lwt_bpf.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/xdp.c
net/dccp/ccids/ccid2.c
net/dsa/slave.c
net/ipv4/fib_frontend.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/icmp.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/mcast.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/l2tp/l2tp_ppp.c
net/llc/llc_core.c
net/mac80211/rx.c
net/mac80211/util.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_immediate.c
net/netfilter/nft_lookup.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
net/openvswitch/meter.c
net/packet/af_packet.c
net/rds/ib_frmr.c
net/rds/ib_mr.h
net/rds/ib_rdma.c
net/rds/rdma.c
net/rds/rds.h
net/rds/send.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/conn_event.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/rxkad.c
net/smc/af_smc.c
net/smc/smc_cdc.c
net/socket.c
net/tipc/net.c
net/tls/tls_sw.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/trace.h
net/xdp/xsk.c
net/xdp/xsk_queue.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/xdp_redirect_cpu_kern.c
samples/bpf/xdp_redirect_cpu_user.c
scripts/Makefile.ubsan
tools/arch/powerpc/include/uapi/asm/unistd.h
tools/arch/x86/include/asm/mcsafe_test.h [new file with mode: 0644]
tools/arch/x86/lib/memcpy_64.S
tools/bpf/bpftool/common.c
tools/bpf/bpftool/map.c
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/btf.h
tools/include/uapi/linux/perf_event.h
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/perf/arch/x86/util/pmu.c
tools/perf/arch/x86/util/tsc.c
tools/perf/bench/Build
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/bench/mem-memcpy-x86-64-lib.c [new file with mode: 0644]
tools/perf/perf.h
tools/perf/util/header.h
tools/perf/util/namespaces.h
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_btf_haskv.c
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc [new file with mode: 0644]
tools/testing/selftests/net/tcp_mmap.c
tools/testing/selftests/rcutorture/bin/configinit.sh
tools/testing/selftests/rcutorture/bin/kvm-build.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot [deleted file]
tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
tools/usb/ffs-test.c
tools/virtio/asm/barrier.h
tools/virtio/linux/kernel.h

index 6c06e10..f5120a0 100644 (file)
@@ -380,31 +380,26 @@ and therefore need no protection.
 as follows:
 
 <pre>
-  1   unsigned long gpnum;
-  2   unsigned long completed;
+  1   unsigned long gp_seq;
 </pre>
 
 <p>RCU grace periods are numbered, and
-the <tt>-&gt;gpnum</tt> field contains the number of the grace
-period that started most recently.
-The <tt>-&gt;completed</tt> field contains the number of the
-grace period that completed most recently.
-If the two fields are equal, the RCU grace period that most recently
-started has already completed, and therefore the corresponding
-flavor of RCU is idle.
-If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
-then <tt>-&gt;gpnum</tt> gives the number of the current RCU
-grace period, which has not yet completed.
-Any other combination of values indicates that something is broken.
-These two fields are protected by the root <tt>rcu_node</tt>'s
+the <tt>-&gt;gp_seq</tt> field contains the current grace-period
+sequence number.
+The bottom two bits are the state of the current grace period,
+which can be zero for not yet started or one for in progress.
+In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
+zero, the corresponding flavor of RCU is idle.
+Any other value in the bottom two bits indicates that something is broken.
+This field is protected by the root <tt>rcu_node</tt> structure's
 <tt>-&gt;lock</tt> field.
 
-</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+</p><p>There are <tt>-&gt;gp_seq</tt> fields
 in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
 as well.
 The fields in the <tt>rcu_state</tt> structure represent the
-most current values, and those of the other structures are compared
-in order to detect the start of a new grace period in a distributed
+most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a distributed
 fashion.
 The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
 (down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
 as follows:
 
 <pre>
-  1   unsigned long gpnum;
-  2   unsigned long completed;
+  1   unsigned long gp_seq;
+  2   unsigned long gp_seq_needed;
 </pre>
 
-<p>These fields are the counterparts of the fields of the same name in
-the <tt>rcu_state</tt> structure.
-They each may lag up to one behind their <tt>rcu_state</tt>
-counterparts.
-If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
+the counterparts of the field of the same name in the <tt>rcu_state</tt>
+structure.
+They each may lag up to one step behind their <tt>rcu_state</tt>
+counterpart.
+If the bottom two bits of a given <tt>rcu_node</tt> structure's
+<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
 structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_node</tt> believes
-is still being waited for.
+</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning and the end
+of each grace period.
+
+<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
+furthest-in-the-future grace period request seen by the corresponding
+<tt>rcu_node</tt> structure.  The request is considered fulfilled when
+the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
+the <tt>-&gt;gp_seq_needed</tt> field.
 
-</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning
-of each grace period, and the <tt>-&gt;completed</tt> fields are
-updated at the end of each grace period.
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Suppose that this <tt>rcu_node</tt> structure doesn't see
+       a request for a very long time.
+       Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
+       problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
+       <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
+       will be updated at the end of the grace period.
+       Modulo-arithmetic comparisons therefore will always get the
+       correct answer, even with wrapping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h5>Quiescent-State Tracking</h5>
 
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
        </ol>
 
        <p><font color="ffffff">So the locking is absolutely required in
-       order to coordinate
-       clearing of the bits with the grace-period numbers in
-       <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+       order to coordinate clearing of the bits with updating of the
+       grace-period sequence number in <tt>-&gt;gp_seq</tt>.
 </font></td></tr>
 <tr><td>&nbsp;</td></tr>
 </table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
 as follows:
 
 <pre>
-  1   unsigned long completed;
-  2   unsigned long gpnum;
+  1   unsigned long gp_seq;
+  2   unsigned long gp_seq_needed;
   3   bool cpu_no_qs;
   4   bool core_needs_qs;
   5   bool gpwrap;
   6   unsigned long rcu_qs_ctr_snap;
 </pre>
 
-<p>The <tt>completed</tt> and <tt>gpnum</tt>
+<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
 fields are the counterparts of the fields of the same name
 in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
 They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
 <tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
 arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
 will catch up upon exit from dyntick-idle mode).
-If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+If the lower two bits of a given <tt>rcu_data</tt> structure's
+<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
 structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
-structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_data</tt> believes
-is still being waited for.
 
 <table>
 <tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
 <tr><td>
        All this replication of the grace period numbers can only cause
        massive confusion.
-       Why not just keep a global pair of counters and be done with it???
+       Why not just keep a global sequence number and be done with it???
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
 <tr><td bgcolor="#ffffff"><font color="ffffff">
-       Because if there was only a single global pair of grace-period
+       Because if there was only a single global sequence
        numbers, there would need to be a single global lock to allow
-       safely accessing and updating them.
+       safely accessing and updating it.
        And if we are not going to have a single global lock, we need
        to carefully manage the numbers on a per-node basis.
        Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
 while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
 RCU core needs a quiescent state from the corresponding CPU.
 The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the <tt>completed</tt>
-and <tt>gpnum</tt> counters are in danger of overflow, which
+CPU has remained idle for so long that the
+<tt>gp_seq</tt> counter is in danger of overflow, which
 will cause the CPU to disregard the values of its counters on
 its next exit from idle.
 Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
 whenever it notices that another RCU grace period has completed.
 The CPU detects the completion of an RCU grace period by noticing
 that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>-&gt;gp_seq</tt> field differs from that of its leaf
 <tt>rcu_node</tt> structure.
 Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field is updated at the end of each
+<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
 grace period.
 
 <p>
index 8651b0b..a346ce0 100644 (file)
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
 grace-period initialization.
 
 <p>The first ordering-related grace-period initialization action is to
-increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
 grace-period-number counter, as shown below:
 
 </p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
 
 <p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
 tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
 <tt>rcu_state</tt> structure, as shown in the following diagram.
 
 </p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
 to notice that a new grace period has started, as described in the next
 section.
 But because the grace-period kthread started the grace period at the
-root (with the increment of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+root (with the advancing of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
 the start of the grace period will happen after the actual start
 of the grace period.
 
@@ -466,7 +466,7 @@ section that the grace period must wait on.
 <tr><td>
        But a RCU read-side critical section might have started
        after the beginning of the grace period
-       (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+       (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
        the grace period wait on such a critical section?
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
 <h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
 
 <p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first setting all the <tt>-&gt;completed</tt> fields equal
-to the number of the newly completed grace period, then it sets
-the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
-again to the number of the newly completed grace period.
+breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
+advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
 The ordering effects are shown below:
 
 </p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
        CPU has reported its quiescent state, but it may be some
        milliseconds before RCU becomes aware of this.
        The latest reasonable candidate is once the <tt>rcu_state</tt>
-       structure's <tt>-&gt;completed</tt> field has been updated,
+       structure's <tt>-&gt;gp_seq</tt> field has been updated,
        but it is quite possible that some CPUs have already completed
        phase two of their updates by that time.
        In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
 <h4><a name="Callback Invocation">Callback Invocation</a></h4>
 
 <p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
 invoking its RCU callbacks that were waiting for this grace period
 to end.
 These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
index 754f426..bf84fba 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="617.89017"
-     inkscape:cy="542.52419"
-     inkscape:window-x="86"
-     inkscape:window-y="28"
+     inkscape:zoom="0.78716603"
+     inkscape:cx="513.06403"
+     inkscape:cy="623.1214"
+     inkscape:window-x="102"
+     inkscape:window-y="38"
      inkscape:window-maximized="0"
      inkscape:current-layer="g3188-3"
      fit-margin-top="5"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3199.1516"
+       x="3145.9592"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
     </g>
     <text
        xml:space="preserve"
-       x="5324.5371"
-       y="15414.598"
+       x="5264.4731"
+       y="15428.84"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-753"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-7"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      style="fill:none;stroke-width:0.025in"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-6-0">Leaf</tspan></text>
-    <text
-       xml:space="preserve"
-       x="7479.5796"
-       y="17699.943"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-9"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <path
        sodipodi:nodetypes="cc"
        inkscape:connector-curvature="0"
        style="fill:none;stroke-width:0.025in"
        transform="translate(-737.93887,7732.6672)"
        id="g3188-3">
-      <text
-         xml:space="preserve"
-         x="3225.7478"
-         y="13175.802"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-60"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
       <g
          id="g3107-62"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-7">Root</tspan></text>
-      <text
-         xml:space="preserve"
-         x="3225.7478"
-         y="13390.038"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-60-3"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">       rnp-&gt;completed</text>
       <flowRoot
          xml:space="preserve"
          id="flowRoot3356"
              height="63.63961"
              x="332.34018"
              y="681.87292" /></flowRegion><flowPara
-           id="flowPara3362" /></flowRoot>    </g>
+           id="flowPara3362" /></flowRoot>      <text
+         xml:space="preserve"
+         x="3156.6121"
+         y="13317.754"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-36-6"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+    </g>
     <g
        style="fill:none;stroke-width:0.025in"
        transform="translate(-858.40227,7769.0342)"
        id="path3414-8-3-6-6"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       x="7418.769"
+       y="17646.104"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-70"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-1642.5377,-11611.245)"
     </g>
     <text
        xml:space="preserve"
-       x="5327.3057"
+       x="5274.1133"
        y="15428.84"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-36"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-151.71746,-11647.612)"
          id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7486.4907"
-       y="17670.119"
+       x="7408.5918"
+       y="17619.504"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-6"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-2"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-6817.1997,-11647.612)"
          id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7474.1382"
-       y="17688.926"
+       x="7416.8003"
+       y="17619.504"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-5"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-3"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <path
      style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
      id="path3414-8-3-6"
      inkscape:connector-curvature="0"
      sodipodi:nodetypes="cc" />
-  <text
-     xml:space="preserve"
-     x="7318.9653"
-     y="6031.6353"
-     font-style="normal"
-     font-weight="bold"
-     font-size="192"
-     id="text202-2"
-     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
   <g
      style="fill:none;stroke-width:0.025in"
      id="g4504-3-9"
      id="path3134-9-0-3-5"
      d="m 6875.6003,15833.906 1595.7755,0"
      style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+  <text
+     xml:space="preserve"
+     x="7275.2612"
+     y="5971.8916"
+     font-style="normal"
+     font-weight="bold"
+     font-size="192"
+     id="text202-36-1"
+     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+       style="font-size:172.87567139px"
+       id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
 </svg>
index 0161262..8c20755 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="617.89019"
-     inkscape:cy="636.57143"
-     inkscape:window-x="697"
+     inkscape:zoom="2.6330492"
+     inkscape:cx="524.82797"
+     inkscape:cy="519.31194"
+     inkscape:window-x="79"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g3188"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3305.5364"
+       x="3119.363"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
index de6ecc5..d24d7d5 100644 (file)
@@ -19,7 +19,7 @@
    id="svg2"
    version="1.1"
    inkscape:version="0.48.4 r9939"
-   sodipodi:docname="TreeRCU-gp-init-2.svg">
+   sodipodi:docname="TreeRCU-gp-init-3.svg">
   <metadata
      id="metadata212">
     <rdf:RDF>
      inkscape:window-width="1087"
      inkscape:window-height="1144"
      id="namedview208"
-     showgrid="false"
-     inkscape:zoom="0.70710678"
+     showgrid="true"
+     inkscape:zoom="0.68224756"
      inkscape:cx="617.89019"
      inkscape:cy="625.84293"
-     inkscape:window-x="697"
+     inkscape:window-x="54"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g3153"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
-     fit-margin-bottom="5" />
+     fit-margin-bottom="5">
+    <inkscape:grid
+       type="xygrid"
+       id="grid3090" />
+  </sodipodi:namedview>
   <path
      sodipodi:nodetypes="cccccccccccccccccccccccc"
      inkscape:connector-curvature="0"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3305.5364"
+       x="3145.9592"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
     </g>
     <text
        xml:space="preserve"
-       x="5392.3345"
-       y="15407.104"
+       x="5253.6904"
+       y="15407.032"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-6"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      style="fill:none;stroke-width:0.025in"
          id="tspan3104-6-5-6-0">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7536.4883"
-       y="17640.934"
+       x="7415.4365"
+       y="17670.572"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-9"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-1642.5375,-11610.962)"
     </g>
     <text
        xml:space="preserve"
-       x="5378.4146"
-       y="15436.927"
+       x="5258.0688"
+       y="15412.313"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-3"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-151.71726,-11647.329)"
          id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7520.1294"
-       y="17673.639"
+       x="7405.2607"
+       y="17670.572"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-35"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-6817.1998,-11647.329)"
          id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7521.4663"
-       y="17666.062"
+       x="7413.4688"
+       y="17670.566"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-75"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <path
      style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
      sodipodi:nodetypes="cc" />
   <text
      xml:space="preserve"
-     x="7370.856"
-     y="5997.5972"
+     x="7271.9297"
+     y="6023.2412"
      font-style="normal"
      font-weight="bold"
      font-size="192"
      id="text202-62"
-     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
 </svg>
index b13b7b0..acd73c7 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.6004608"
-     inkscape:cx="826.65969"
-     inkscape:cy="483.3047"
-     inkscape:window-x="66"
-     inkscape:window-y="28"
+     inkscape:zoom="0.81932583"
+     inkscape:cx="840.45848"
+     inkscape:cy="5052.4242"
+     inkscape:window-x="787"
+     inkscape:window-y="24"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g4"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1749.0282,658.72243)"
        id="g3188">
-      <text
-         xml:space="preserve"
-         x="3305.5364"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-5"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
       <g
          id="g3107-62"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-7">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3137.9988"
+         y="13271.316"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-626"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
     </g>
     <rect
        ry="0"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1739.0986,17188.625)"
        id="g3188-6">
-      <text
-         xml:space="preserve"
-         x="3305.5364"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
       <g
          id="g3107-5"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-1">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3147.9268"
+         y="13240.524"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-1"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
       </g>
       <text
          xml:space="preserve"
-         x="5392.3345"
-         y="15407.104"
+         x="5263.1094"
+         y="15411.646"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-6-7"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-92"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
            id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7536.4883"
-         y="17640.934"
+         x="7417.4053"
+         y="17655.502"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-9"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-759"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-2353.8462,17224.992)"
       </g>
       <text
          xml:space="preserve"
-         x="5378.4146"
-         y="15436.927"
+         x="5246.1548"
+         y="15411.648"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-3"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-87"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-863.02613,17188.625)"
            id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7520.1294"
-         y="17673.639"
+         x="7433.8257"
+         y="17682.098"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-35"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-2"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-7528.5085,17188.625)"
            id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7521.4663"
-         y="17666.062"
+         x="7415.4404"
+         y="17682.098"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-75-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-0"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <path
        style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
        id="path3414-8-3-6-4"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       x="6659.5469"
-       y="34833.551"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-62"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
     <path
        sodipodi:nodetypes="ccc"
        inkscape:connector-curvature="0"
          font-weight="bold"
          font-size="192"
          id="text202-6-6-5"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
       <text
          xml:space="preserve"
          x="5035.4155"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1874.038,53203.538)"
        id="g3188-7">
-      <text
-         xml:space="preserve"
-         x="3199.1516"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-82"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
       <g
          id="g3107-53"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-19">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3175.896"
+         y="13240.11"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-36-3"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <rect
        ry="0"
       </g>
       <text
          xml:space="preserve"
-         x="5324.5371"
-         y="15414.598"
+         x="5264.4829"
+         y="15411.231"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-753"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-7"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
-    <text
-       xml:space="preserve"
-       x="10084.225"
-       y="70903.312"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-9-0"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <path
        sodipodi:nodetypes="ccc"
        inkscape:connector-curvature="0"
        id="path3134-9-0-3-9"
        d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
        style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
-    <text
-       xml:space="preserve"
-       x="5092.4683"
-       y="74111.672"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-60"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
     <g
        style="fill:none;stroke-width:0.025in"
        id="g3107-62-6"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-7-7">Root</tspan></text>
-    <text
-       xml:space="preserve"
-       x="5092.4683"
-       y="74325.906"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-60-3"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">       rnp-&gt;completed</text>
     <g
        style="fill:none;stroke-width:0.025in"
        transform="translate(1746.2528,60972.572)"
       </g>
       <text
          xml:space="preserve"
-         x="5327.3057"
-         y="15428.84"
+         x="5274.1216"
+         y="15411.231"
          font-style="normal"
          font-weight="bold"
          font-size="192"
          id="text202-36"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        transform="translate(-728.08545,53203.538)"
            id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7486.4907"
-         y="17670.119"
+         x="7435.1987"
+         y="17708.281"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-6-2"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-9"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        transform="translate(-7393.5687,53203.538)"
            id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7474.1382"
-         y="17688.926"
+         x="7416.8125"
+         y="17708.281"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-5-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-35"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <path
        style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
        id="path3414-8-3-6-67"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       x="6742.6001"
-       y="70882.617"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-2"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <g
        style="fill:none;stroke-width:0.025in"
        id="g4504-3-9-6"
        font-size="192"
        id="text202-7-9-6-6-7"
        style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+    <text
+       xml:space="preserve"
+       x="6698.9019"
+       y="70885.211"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-2"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="10023.457"
+       y="70885.234"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-0"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="5023.3389"
+       y="74209.773"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-36"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="6562.5884"
+       y="34870.727"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-3"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
 </svg>
index de3992f..149bec2 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="616.47598"
-     inkscape:cy="595.41964"
-     inkscape:window-x="813"
+     inkscape:zoom="0.96484375"
+     inkscape:cx="507.0191"
+     inkscape:cy="885.62207"
+     inkscape:window-x="47"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="g4405"
+     inkscape:current-layer="g3115"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
          font-weight="bold"
          font-size="192"
          id="text202-6-6"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
       <text
          xml:space="preserve"
          x="5035.4155"
index 4259f95..f99cf11 100644 (file)
@@ -172,7 +172,7 @@ it will print a message similar to the following:
        INFO: rcu_sched detected stalls on CPUs/tasks:
        2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
        16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
-       (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
+       (detected by 32, t=2603 jiffies, g=7075, q=625)
 
 This message indicates that CPU 32 detected that CPUs 2 and 16 were both
 causing stalls, and that the stall was affecting RCU-sched.  This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
 period.
 
 The "detected by" line indicates which CPU detected the stall (in this
-case, CPU 32), how many jiffies have elapsed since the start of the
-grace period (in this case 2603), the number of the last grace period
-to start and to complete (7073 and 7072, respectively), and an estimate
-of the total number of RCU callbacks queued across all CPUs (625 in
-this case).
+case, CPU 32), how many jiffies have elapsed since the start of the grace
+period (in this case 2603), the grace-period sequence number (7075), and
+an estimate of the total number of RCU callbacks queued across all CPUs
+(625 in this case).
 
 In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
 for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
 the stall warning, as was the case in the "All QSes seen" line above,
 the following additional line is printed:
 
-       kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
+       kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
 
 Starving the grace-period kthreads of CPU time can of course result
 in RCU CPU stall warnings even when all CPUs and tasks have passed
-through the required quiescent states.  The "g" and "c" numbers flag the
-number of the last grace period started and completed, respectively,
-the "f" precedes the ->gp_flags command to the grace-period kthread,
-the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
-timeout, and the "state" precedes value of the task_struct ->state field.
+through the required quiescent states.  The "g" number shows the current
+grace-period sequence number, the "f" precedes the ->gp_flags command
+to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
+kthread is waiting for a short timeout, the "state" precedes value of the
+task_struct ->state field, and the "cpu" indicates that the grace-period
+kthread last ran on CPU 5.
 
 
 Multiple Warnings From One Stall
index 65eb856..c2a7fac 100644 (file)
@@ -588,6 +588,7 @@ It is extremely simple:
        void synchronize_rcu(void)
        {
                write_lock(&rcu_gp_mutex);
+               smp_mb__after_spinlock();
                write_unlock(&rcu_gp_mutex);
        }
 
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
 
 The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
 and release a global reader-writer lock.  The synchronize_rcu()
-primitive write-acquires this same lock, then immediately releases
-it.  This means that once synchronize_rcu() exits, all RCU read-side
-critical sections that were in progress before synchronize_rcu() was
-called are guaranteed to have completed -- there is no way that
-synchronize_rcu() would have been able to write-acquire the lock
-otherwise.
+primitive write-acquires this same lock, then releases it.  This means
+that once synchronize_rcu() exits, all RCU read-side critical sections
+that were in progress before synchronize_rcu() was called are guaranteed
+to have completed -- there is no way that synchronize_rcu() would have
+been able to write-acquire the lock otherwise.  The smp_mb__after_spinlock()
+promotes synchronize_rcu() to a full memory barrier in compliance with
+the "Memory-Barrier Guarantees" listed in:
+
+       Documentation/RCU/Design/Requirements/Requirements.html.
 
 It is possible to nest rcu_read_lock(), since reader-writer locks may
 be recursively acquired.  Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
        list_next_rcu
        list_for_each_entry_rcu
        list_for_each_entry_continue_rcu
+       list_for_each_entry_from_rcu
        hlist_first_rcu
        hlist_next_rcu
        hlist_pprev_rcu
        hlist_for_each_entry_rcu
        hlist_for_each_entry_rcu_bh
+       hlist_for_each_entry_from_rcu
        hlist_for_each_entry_continue_rcu
        hlist_for_each_entry_continue_rcu_bh
        hlist_nulls_first_rcu
index 533ff5c..c370f5f 100644 (file)
                        Set time (s) after boot for CPU-hotplug testing.
 
        rcutorture.onoff_interval= [KNL]
-                       Set time (s) between CPU-hotplug operations, or
-                       zero to disable CPU-hotplug testing.
+                       Set time (jiffies) between CPU-hotplug operations,
+                       or zero to disable CPU-hotplug testing.
 
        rcutorture.shuffle_interval= [KNL]
                        Set task-shuffle interval (s).  Shuffling tasks
index 5f89fb6..f97fd8a 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 
 - compatible : should be "ingenic,<socname>-intc". Valid strings are:
     ingenic,jz4740-intc
+    ingenic,jz4725b-intc
     ingenic,jz4770-intc
     ingenic,jz4775-intc
     ingenic,jz4780-intc
index 20f121d..697ca2f 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
     - "renesas,irqc-r8a73a4" (R-Mobile APE6)
     - "renesas,irqc-r8a7743" (RZ/G1M)
     - "renesas,irqc-r8a7745" (RZ/G1E)
+    - "renesas,irqc-r8a77470" (RZ/G1C)
     - "renesas,irqc-r8a7790" (R-Car H2)
     - "renesas,irqc-r8a7791" (R-Car M2-W)
     - "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
     - "renesas,intc-ex-r8a77965" (R-Car M3-N)
     - "renesas,intc-ex-r8a77970" (R-Car V3M)
+    - "renesas,intc-ex-r8a77980" (R-Car V3H)
     - "renesas,intc-ex-r8a77995" (R-Car D3)
 - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
   interrupts.txt in this directory
index 252a05c..c8c4b00 100644 (file)
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
 the node is not important. The content of the node is defined in dwc3.txt.
 
 Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
+Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
+Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt     - Type-C PHY
 
 Example device nodes:
 
index 79fede4..d638b5a 100644 (file)
@@ -1,5 +1,6 @@
 .. include:: <isonum.txt>
 
+=========================================================
 DPAA2 (Data Path Acceleration Architecture Gen2) Overview
 =========================================================
 
index 0fe4228..d0d7290 100644 (file)
@@ -5444,6 +5444,7 @@ F:        drivers/iommu/exynos-iommu.c
 
 EZchip NPS platform support
 M:     Vineet Gupta <vgupta@synopsys.com>
+M:     Ofer Levi <oferle@mellanox.com>
 S:     Supported
 F:     arch/arc/plat-eznps
 F:     arch/arc/boot/dts/eznps.dts
@@ -5929,7 +5930,7 @@ F:        Documentation/dev-tools/gcov.rst
 
 GDB KERNEL DEBUGGING HELPER SCRIPTS
 M:     Jan Kiszka <jan.kiszka@siemens.com>
-M:     Kieran Bingham <kieran@bingham.xyz>
+M:     Kieran Bingham <kbingham@kernel.org>
 S:     Supported
 F:     scripts/gdb/
 
@@ -7095,6 +7096,7 @@ F:        include/uapi/linux/input.h
 F:     include/uapi/linux/input-event-codes.h
 F:     include/linux/input/
 F:     Documentation/devicetree/bindings/input/
+F:     Documentation/devicetree/bindings/serio/
 F:     Documentation/input/
 
 INPUT MULTITOUCH (MT) PROTOCOL
@@ -7984,7 +7986,7 @@ F:        lib/test_kmod.c
 F:     tools/testing/selftests/kmod/
 
 KPROBES
-M:     Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
+M:     Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
 M:     Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 M:     "David S. Miller" <davem@davemloft.net>
 M:     Masami Hiramatsu <mhiramat@kernel.org>
@@ -12037,9 +12039,9 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/
 X:     Documentation/RCU/torture.txt
 F:     include/linux/rcu*
-X:     include/linux/srcu.h
+X:     include/linux/srcu*.h
 F:     kernel/rcu/
-X:     kernel/torture.c
+X:     kernel/rcu/srcu*.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
@@ -13076,8 +13078,8 @@ L:      linux-kernel@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F:     include/linux/srcu.h
-F:     kernel/rcu/srcu.c
+F:     include/linux/srcu*.h
+F:     kernel/rcu/srcu*.c
 
 SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
 M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -14436,6 +14438,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/torture.c
 F:     kernel/rcu/rcutorture.c
+F:     kernel/rcu/rcuperf.c
 F:     kernel/locking/locktorture.c
 
 TOSHIBA ACPI EXTRAS DRIVER
index 67d9d20..863f585 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index 9cf59fc..5151d81 100644 (file)
@@ -50,6 +50,9 @@ config ARC
        select HAVE_KERNEL_LZMA
        select ARCH_HAS_PTE_SPECIAL
 
+config ARCH_HAS_CACHE_LINE_SIZE
+       def_bool y
+
 config MIGHT_HAVE_PCI
        bool
 
index 8486f32..ff7d323 100644 (file)
@@ -48,7 +48,9 @@
 })
 
 /* Largest line length for either L1 or L2 is 128 bytes */
-#define ARCH_DMA_MINALIGN      128
+#define SMP_CACHE_BYTES                128
+#define cache_line_size()      SMP_CACHE_BYTES
+#define ARCH_DMA_MINALIGN      SMP_CACHE_BYTES
 
 extern void arc_cache_init(void);
 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
index d5da211..03d6bb0 100644 (file)
 #ifndef __ASM_ARC_UDELAY_H
 #define __ASM_ARC_UDELAY_H
 
+#include <asm-generic/types.h>
 #include <asm/param.h>         /* HZ */
 
+extern unsigned long loops_per_jiffy;
+
 static inline void __delay(unsigned long loops)
 {
        __asm__ __volatile__(
index 9dbe645..25c6319 100644 (file)
@@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
                      unsigned long pfn)
 {
-       unsigned int paddr = pfn << PAGE_SHIFT;
+       phys_addr_t paddr = pfn << PAGE_SHIFT;
 
        u_vaddr &= PAGE_MASK;
 
@@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
                     unsigned long u_vaddr)
 {
        /* TBD: do we really need to clear the kernel mapping */
-       __flush_dcache_page(page_address(page), u_vaddr);
-       __flush_dcache_page(page_address(page), page_address(page));
+       __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
+       __flush_dcache_page((phys_addr_t)page_address(page),
+                           (phys_addr_t)page_address(page));
 
 }
 
@@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
                }
        }
 
+       /*
+        * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
+        * or equal to any cache line length.
+        */
+       BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
+                        "SMP_CACHE_BYTES must be >= any cache line length");
+       if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
+               panic("L2 Cache line [%d] > kernel Config [%d]\n",
+                     l2_line_sz, SMP_CACHE_BYTES);
+
        /* Note that SLC disable not formally supported till HS 3.0 */
        if (is_isa_arcv2() && l2_line_sz && !slc_enable)
                arc_slc_disable();
index 8c10718..ec47e60 100644 (file)
@@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 
+/*
+ * Cache operations depending on function and direction argument, inspired by
+ * https://lkml.org/lkml/2018/5/18/979
+ * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
+ * dma-mapping: provide a generic dma-noncoherent implementation)"
+ *
+ *          |   map          ==  for_device     |   unmap     ==  for_cpu
+ *          |----------------------------------------------------------------
+ * TO_DEV   |   writeback        writeback      |   none          none
+ * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
+ * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
+ *
+ *     [*] needed for CPU speculative prefetches
+ *
+ * NOTE: we don't check the validity of direction argument as it is done in
+ * upper layer functions (in include/linux/dma-mapping.h)
+ */
+
 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
                size_t size, enum dma_data_direction dir)
 {
-       dma_cache_wback(paddr, size);
+       switch (dir) {
+       case DMA_TO_DEVICE:
+               dma_cache_wback(paddr, size);
+               break;
+
+       case DMA_FROM_DEVICE:
+               dma_cache_inv(paddr, size);
+               break;
+
+       case DMA_BIDIRECTIONAL:
+               dma_cache_wback_inv(paddr, size);
+               break;
+
+       default:
+               break;
+       }
 }
 
 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
                size_t size, enum dma_data_direction dir)
 {
-       dma_cache_inv(paddr, size);
+       switch (dir) {
+       case DMA_TO_DEVICE:
+               break;
+
+       /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
+       case DMA_FROM_DEVICE:
+       case DMA_BIDIRECTIONAL:
+               dma_cache_inv(paddr, size);
+               break;
+
+       default:
+               break;
+       }
 }
index 0c7d110..4f6a167 100644 (file)
@@ -21,6 +21,7 @@
 #error "Incorrect ctop.h include"
 #endif
 
+#include <linux/types.h>
 #include <soc/nps/common.h>
 
 /* core auxiliary registers */
@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
 };
 
 /* AUX registers definition */
+struct nps_host_reg_aux_dpc {
+       union {
+               struct {
+                       u32 ien:1, men:1, hen:1, reserved:29;
+               };
+               u32 value;
+       };
+};
+
 struct nps_host_reg_aux_udmc {
        union {
                struct {
index 2388de3..ed0077e 100644 (file)
@@ -15,6 +15,8 @@
  */
 
 #include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/log2.h>
 #include <asm/arcregs.h>
@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
 /* Verify and set the value of the mtm hs counter */
 static int __init set_mtm_hs_ctr(char *ctr_str)
 {
-       long hs_ctr;
+       int hs_ctr;
        int ret;
 
-       ret = kstrtol(ctr_str, 0, &hs_ctr);
+       ret = kstrtoint(ctr_str, 0, &hs_ctr);
 
        if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
                pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
index 843edfd..d7a8128 100644 (file)
@@ -337,8 +337,8 @@ config ARCH_MULTIPLATFORM
        select TIMER_OF
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select MIGHT_HAVE_PCI
-       select MULTI_IRQ_HANDLER
        select PCI_DOMAINS if PCI
        select SPARSE_IRQ
        select USE_OF
@@ -465,9 +465,9 @@ config ARCH_DOVE
        bool "Marvell Dove"
        select CPU_PJ4
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select MIGHT_HAVE_PCI
-       select MULTI_IRQ_HANDLER
        select MVEBU_MBUS
        select PINCTRL
        select PINCTRL_DOVE
@@ -512,8 +512,8 @@ config ARCH_LPC32XX
        select COMMON_CLK
        select CPU_ARM926T
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select MULTI_IRQ_HANDLER
        select SPARSE_IRQ
        select USE_OF
        help
@@ -532,11 +532,11 @@ config ARCH_PXA
        select TIMER_OF
        select CPU_XSCALE if !CPU_XSC3
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIO_PXA
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
        select PLAT_PXA
        select SPARSE_IRQ
        help
@@ -572,11 +572,11 @@ config ARCH_SA1100
        select CPU_FREQ
        select CPU_SA1100
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
        select ISA
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
        help
@@ -590,10 +590,10 @@ config ARCH_S3C24XX
        select GENERIC_CLOCKEVENTS
        select GPIO_SAMSUNG
        select GPIOLIB
+       select GENERIC_IRQ_MULTI_HANDLER
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C2410_WATCHDOG if WATCHDOG
        select HAVE_S3C_RTC if RTC_CLASS
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_IO_H
        select SAMSUNG_ATAGS
        select USE_OF
@@ -627,10 +627,10 @@ config ARCH_OMAP1
        select CLKSRC_MMIO
        select GENERIC_CLOCKEVENTS
        select GENERIC_IRQ_CHIP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_IO_H if PCCARD
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
@@ -921,11 +921,6 @@ config IWMMXT
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
 
-config MULTI_IRQ_HANDLER
-       bool
-       help
-         Allow each machine to specify it's own IRQ handler at run time.
-
 if !MMU
 source "arch/arm/Kconfig-nommu"
 endif
index 17f1f1a..38badaa 100644 (file)
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
 #define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (false)
 
+#define efi_table_attr(table, attr, instance)                          \
+       ((table##_t *)instance)->attr
+
 #define efi_call_proto(protocol, f, instance, ...)                     \
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
index b6f3196..c883fcb 100644 (file)
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
 void init_IRQ(void);
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
 #ifdef CONFIG_SMP
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
index 5c1ad11..bb88512 100644 (file)
@@ -59,7 +59,7 @@ struct machine_desc {
        void                    (*init_time)(void);
        void                    (*init_machine)(void);
        void                    (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        void                    (*handle_irq)(struct pt_regs *);
 #endif
        void                    (*restart)(enum reboot_mode, const char *);
index 179a9f6..e85a3af 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/glue-df.h>
 #include <asm/glue-pf.h>
 #include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
 #include <mach/entry-macro.S>
 #endif
 #include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
  * Interrupt handling.
  */
        .macro  irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        ldr     r1, =handle_arch_irq
        mov     r0, sp
        badr    lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
        .globl  cr_alignment
 cr_alignment:
        .space  4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-       .globl  handle_arch_irq
-handle_arch_irq:
-       .space  4
-#endif
index 106a146..746565a 100644 (file)
@@ -48,6 +48,7 @@ saved_pc      .req    lr
  * from those features make this path too inefficient.
  */
 ret_fast_syscall:
+__ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        disable_irq_notrace                     @ disable interrupts
@@ -78,6 +79,7 @@ fast_work_pending:
  * call.
  */
 ret_fast_syscall:
+__ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
@@ -255,7 +257,7 @@ local_restart:
        tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
        bne     __sys_trace
 
-       invoke_syscall tbl, scno, r10, ret_fast_syscall
+       invoke_syscall tbl, scno, r10, __ret_fast_syscall
 
        add     r1, sp, #S_OFF
 2:     cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
index ece04a4..9908dac 100644 (file)
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
        uniphier_cache_init();
 }
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       if (handle_arch_irq)
-               return;
-
-       handle_arch_irq = handle_irq;
-}
-#endif
-
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
index 225d1c5..d9c2991 100644 (file)
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
 
 static int __init gate_vma_init(void)
 {
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
        return 0;
 }
index 35ca494..4c249cb 100644 (file)
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_crashkernel();
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
 
index 39aef48..04b2f22 100644 (file)
@@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
  */
 static void ecard_init_pgtables(struct mm_struct *mm)
 {
-       struct vm_area_struct vma;
+       struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
 
        /* We want to set up the page tables for the following mapping:
         *  Virtual     Physical
@@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
 
        memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
 
-       vma.vm_flags = VM_EXEC;
-       vma.vm_mm = mm;
-
        flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
        flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
 }
index 42c090c..3d10119 100644 (file)
@@ -74,6 +74,7 @@ config ARM64
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
@@ -264,9 +265,6 @@ config ARCH_SUPPORTS_UPROBES
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
-config MULTI_IRQ_HANDLER
-       def_bool y
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
index 88f5aef..e3a375c 100644 (file)
         *                           u32 *macp, u8 const rk[], u32 rounds);
         */
 ENTRY(ce_aes_ccm_auth_data)
-       frame_push      7
-
-       mov     x19, x0
-       mov     x20, x1
-       mov     x21, x2
-       mov     x22, x3
-       mov     x23, x4
-       mov     x24, x5
-
-       ldr     w25, [x22]                      /* leftover from prev round? */
+       ldr     w8, [x3]                        /* leftover from prev round? */
        ld1     {v0.16b}, [x0]                  /* load mac */
-       cbz     w25, 1f
-       sub     w25, w25, #16
+       cbz     w8, 1f
+       sub     w8, w8, #16
        eor     v1.16b, v1.16b, v1.16b
-0:     ldrb    w7, [x20], #1                   /* get 1 byte of input */
-       subs    w21, w21, #1
-       add     w25, w25, #1
+0:     ldrb    w7, [x1], #1                    /* get 1 byte of input */
+       subs    w2, w2, #1
+       add     w8, w8, #1
        ins     v1.b[0], w7
        ext     v1.16b, v1.16b, v1.16b, #1      /* rotate in the input bytes */
        beq     8f                              /* out of input? */
-       cbnz    w25, 0b
+       cbnz    w8, 0b
        eor     v0.16b, v0.16b, v1.16b
-1:     ld1     {v3.4s}, [x23]                  /* load first round key */
-       prfm    pldl1strm, [x20]
-       cmp     w24, #12                        /* which key size? */
-       add     x6, x23, #16
-       sub     w7, w24, #2                     /* modified # of rounds */
+1:     ld1     {v3.4s}, [x4]                   /* load first round key */
+       prfm    pldl1strm, [x1]
+       cmp     w5, #12                         /* which key size? */
+       add     x6, x4, #16
+       sub     w7, w5, #2                      /* modified # of rounds */
        bmi     2f
        bne     5f
        mov     v5.16b, v3.16b
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
        ld1     {v5.4s}, [x6], #16              /* load next round key */
        bpl     3b
        aese    v0.16b, v4.16b
-       subs    w21, w21, #16                   /* last data? */
+       subs    w2, w2, #16                     /* last data? */
        eor     v0.16b, v0.16b, v5.16b          /* final round */
        bmi     6f
-       ld1     {v1.16b}, [x20], #16            /* load next input block */
+       ld1     {v1.16b}, [x1], #16             /* load next input block */
        eor     v0.16b, v0.16b, v1.16b          /* xor with mac */
-       beq     6f
-
-       if_will_cond_yield_neon
-       st1     {v0.16b}, [x19]                 /* store mac */
-       do_cond_yield_neon
-       ld1     {v0.16b}, [x19]                 /* reload mac */
-       endif_yield_neon
-
-       b       1b
-6:     st1     {v0.16b}, [x19]                 /* store mac */
+       bne     1b
+6:     st1     {v0.16b}, [x0]                  /* store mac */
        beq     10f
-       adds    w21, w21, #16
+       adds    w2, w2, #16
        beq     10f
-       mov     w25, w21
-7:     ldrb    w7, [x20], #1
+       mov     w8, w2
+7:     ldrb    w7, [x1], #1
        umov    w6, v0.b[0]
        eor     w6, w6, w7
-       strb    w6, [x19], #1
-       subs    w21, w21, #1
+       strb    w6, [x0], #1
+       subs    w2, w2, #1
        beq     10f
        ext     v0.16b, v0.16b, v0.16b, #1      /* rotate out the mac bytes */
        b       7b
-8:     mov     w7, w25
-       add     w25, w25, #16
+8:     mov     w7, w8
+       add     w8, w8, #16
 9:     ext     v1.16b, v1.16b, v1.16b, #1
        adds    w7, w7, #1
        bne     9b
        eor     v0.16b, v0.16b, v1.16b
-       st1     {v0.16b}, [x19]
-10:    str     w25, [x22]
-
-       frame_pop
+       st1     {v0.16b}, [x0]
+10:    str     w8, [x3]
        ret
 ENDPROC(ce_aes_ccm_auth_data)
 
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
 ENDPROC(ce_aes_ccm_final)
 
        .macro  aes_ccm_do_crypt,enc
-       frame_push      8
-
-       mov     x19, x0
-       mov     x20, x1
-       mov     x21, x2
-       mov     x22, x3
-       mov     x23, x4
-       mov     x24, x5
-       mov     x25, x6
-
-       ldr     x26, [x25, #8]                  /* load lower ctr */
-       ld1     {v0.16b}, [x24]                 /* load mac */
-CPU_LE(        rev     x26, x26                )       /* keep swabbed ctr in reg */
+       ldr     x8, [x6, #8]                    /* load lower ctr */
+       ld1     {v0.16b}, [x5]                  /* load mac */
+CPU_LE(        rev     x8, x8                  )       /* keep swabbed ctr in reg */
 0:     /* outer loop */
-       ld1     {v1.8b}, [x25]                  /* load upper ctr */
-       prfm    pldl1strm, [x20]
-       add     x26, x26, #1
-       rev     x9, x26
-       cmp     w23, #12                        /* which key size? */
-       sub     w7, w23, #2                     /* get modified # of rounds */
+       ld1     {v1.8b}, [x6]                   /* load upper ctr */
+       prfm    pldl1strm, [x1]
+       add     x8, x8, #1
+       rev     x9, x8
+       cmp     w4, #12                         /* which key size? */
+       sub     w7, w4, #2                      /* get modified # of rounds */
        ins     v1.d[1], x9                     /* no carry in lower ctr */
-       ld1     {v3.4s}, [x22]                  /* load first round key */
-       add     x10, x22, #16
+       ld1     {v3.4s}, [x3]                   /* load first round key */
+       add     x10, x3, #16
        bmi     1f
        bne     4f
        mov     v5.16b, v3.16b
@@ -194,9 +165,9 @@ CPU_LE(     rev     x26, x26                )       /* keep swabbed ctr in reg */
        bpl     2b
        aese    v0.16b, v4.16b
        aese    v1.16b, v4.16b
-       subs    w21, w21, #16
-       bmi     7f                              /* partial block? */
-       ld1     {v2.16b}, [x20], #16            /* load next input block */
+       subs    w2, w2, #16
+       bmi     6f                              /* partial block? */
+       ld1     {v2.16b}, [x1], #16             /* load next input block */
        .if     \enc == 1
        eor     v2.16b, v2.16b, v5.16b          /* final round enc+mac */
        eor     v1.16b, v1.16b, v2.16b          /* xor with crypted ctr */
@@ -205,29 +176,18 @@ CPU_LE(   rev     x26, x26                )       /* keep swabbed ctr in reg */
        eor     v1.16b, v2.16b, v5.16b          /* final round enc */
        .endif
        eor     v0.16b, v0.16b, v2.16b          /* xor mac with pt ^ rk[last] */
-       st1     {v1.16b}, [x19], #16            /* write output block */
-       beq     5f
-
-       if_will_cond_yield_neon
-       st1     {v0.16b}, [x24]                 /* store mac */
-       do_cond_yield_neon
-       ld1     {v0.16b}, [x24]                 /* reload mac */
-       endif_yield_neon
-
-       b       0b
-5:
-CPU_LE(        rev     x26, x26                        )
-       st1     {v0.16b}, [x24]                 /* store mac */
-       str     x26, [x25, #8]                  /* store lsb end of ctr (BE) */
-
-6:     frame_pop
-       ret
-
-7:     eor     v0.16b, v0.16b, v5.16b          /* final round mac */
+       st1     {v1.16b}, [x0], #16             /* write output block */
+       bne     0b
+CPU_LE(        rev     x8, x8                  )
+       st1     {v0.16b}, [x5]                  /* store mac */
+       str     x8, [x6, #8]                    /* store lsb end of ctr (BE) */
+5:     ret
+
+6:     eor     v0.16b, v0.16b, v5.16b          /* final round mac */
        eor     v1.16b, v1.16b, v5.16b          /* final round enc */
-       st1     {v0.16b}, [x24]                 /* store mac */
-       add     w21, w21, #16                   /* process partial tail block */
-8:     ldrb    w9, [x20], #1                   /* get 1 byte of input */
+       st1     {v0.16b}, [x5]                  /* store mac */
+       add     w2, w2, #16                     /* process partial tail block */
+7:     ldrb    w9, [x1], #1                    /* get 1 byte of input */
        umov    w6, v1.b[0]                     /* get top crypted ctr byte */
        umov    w7, v0.b[0]                     /* get top mac byte */
        .if     \enc == 1
@@ -237,13 +197,13 @@ CPU_LE(   rev     x26, x26                        )
        eor     w9, w9, w6
        eor     w7, w7, w9
        .endif
-       strb    w9, [x19], #1                   /* store out byte */
-       strb    w7, [x24], #1                   /* store mac byte */
-       subs    w21, w21, #1
-       beq     6b
+       strb    w9, [x0], #1                    /* store out byte */
+       strb    w7, [x5], #1                    /* store mac byte */
+       subs    w2, w2, #1
+       beq     5b
        ext     v0.16b, v0.16b, v0.16b, #1      /* shift out mac byte */
        ext     v1.16b, v1.16b, v1.16b, #1      /* shift out ctr byte */
-       b       8b
+       b       7b
        .endm
 
        /*
index dcffb9e..c723647 100644 (file)
@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
        .endm
 
        .macro          pmull_gcm_do_crypt, enc
-       frame_push      10
+       ld1             {SHASH.2d}, [x4]
+       ld1             {XL.2d}, [x1]
+       ldr             x8, [x5, #8]                    // load lower counter
 
-       mov             x19, x0
-       mov             x20, x1
-       mov             x21, x2
-       mov             x22, x3
-       mov             x23, x4
-       mov             x24, x5
-       mov             x25, x6
-       mov             x26, x7
-       .if             \enc == 1
-       ldr             x27, [sp, #96]                  // first stacked arg
-       .endif
-
-       ldr             x28, [x24, #8]                  // load lower counter
-CPU_LE(        rev             x28, x28        )
-
-0:     mov             x0, x25
-       load_round_keys w26, x0
-       ld1             {SHASH.2d}, [x23]
-       ld1             {XL.2d}, [x20]
+       load_round_keys w7, x6
 
        movi            MASK.16b, #0xe1
        ext             SHASH2.16b, SHASH.16b, SHASH.16b, #8
+CPU_LE(        rev             x8, x8          )
        shl             MASK.2d, MASK.2d, #57
        eor             SHASH2.16b, SHASH2.16b, SHASH.16b
 
        .if             \enc == 1
-       ld1             {KS.16b}, [x27]
+       ldr             x10, [sp]
+       ld1             {KS.16b}, [x10]
        .endif
 
-1:     ld1             {CTR.8b}, [x24]                 // load upper counter
-       ld1             {INP.16b}, [x22], #16
-       rev             x9, x28
-       add             x28, x28, #1
-       sub             w19, w19, #1
+0:     ld1             {CTR.8b}, [x5]                  // load upper counter
+       ld1             {INP.16b}, [x3], #16
+       rev             x9, x8
+       add             x8, x8, #1
+       sub             w0, w0, #1
        ins             CTR.d[1], x9                    // set lower counter
 
        .if             \enc == 1
        eor             INP.16b, INP.16b, KS.16b        // encrypt input
-       st1             {INP.16b}, [x21], #16
+       st1             {INP.16b}, [x2], #16
        .endif
 
        rev64           T1.16b, INP.16b
 
-       cmp             w26, #12
-       b.ge            4f                              // AES-192/256?
+       cmp             w7, #12
+       b.ge            2f                              // AES-192/256?
 
-2:     enc_round       CTR, v21
+1:     enc_round       CTR, v21
 
        ext             T2.16b, XL.16b, XL.16b, #8
        ext             IN1.16b, T1.16b, T1.16b, #8
@@ -425,39 +411,27 @@ CPU_LE(   rev             x28, x28        )
 
        .if             \enc == 0
        eor             INP.16b, INP.16b, KS.16b
-       st1             {INP.16b}, [x21], #16
+       st1             {INP.16b}, [x2], #16
        .endif
 
-       cbz             w19, 3f
+       cbnz            w0, 0b
 
-       if_will_cond_yield_neon
-       st1             {XL.2d}, [x20]
-       .if             \enc == 1
-       st1             {KS.16b}, [x27]
-       .endif
-       do_cond_yield_neon
-       b               0b
-       endif_yield_neon
+CPU_LE(        rev             x8, x8          )
+       st1             {XL.2d}, [x1]
+       str             x8, [x5, #8]                    // store lower counter
 
-       b               1b
-
-3:     st1             {XL.2d}, [x20]
        .if             \enc == 1
-       st1             {KS.16b}, [x27]
+       st1             {KS.16b}, [x10]
        .endif
 
-CPU_LE(        rev             x28, x28        )
-       str             x28, [x24, #8]                  // store lower counter
-
-       frame_pop
        ret
 
-4:     b.eq            5f                              // AES-192?
+2:     b.eq            3f                              // AES-192?
        enc_round       CTR, v17
        enc_round       CTR, v18
-5:     enc_round       CTR, v19
+3:     enc_round       CTR, v19
        enc_round       CTR, v20
-       b               2b
+       b               1b
        .endm
 
        /*
index 7cf0b1a..8a10f1d 100644 (file)
@@ -488,9 +488,13 @@ static int gcm_decrypt(struct aead_request *req)
                        err = skcipher_walk_done(&walk,
                                                 walk.nbytes % AES_BLOCK_SIZE);
                }
-               if (walk.nbytes)
-                       pmull_gcm_encrypt_block(iv, iv, NULL,
+               if (walk.nbytes) {
+                       kernel_neon_begin();
+                       pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
                                                num_rounds(&ctx->aes_key));
+                       kernel_neon_end();
+               }
+
        } else {
                __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
                                    num_rounds(&ctx->aes_key));
index 192d791..7ed3208 100644 (file)
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
 #define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (true)
 
+#define efi_table_attr(table, attr, instance)                          \
+       ((table##_t *)instance)->attr
+
 #define efi_call_proto(protocol, f, instance, ...)                     \
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
index a0fee69..b2b0c64 100644 (file)
@@ -8,8 +8,6 @@
 
 struct pt_regs;
 
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
 static inline int nr_legacy_irqs(void)
 {
        return 0;
index ffdaea7..0ad1cf2 100644 (file)
@@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table)
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+       struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 
        /*
         * The ASID allocator will either invalidate the ASID or mark
index f24892a..c6d8074 100644 (file)
@@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 
 static void update_cpu_capabilities(u16 scope_mask)
 {
-       __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
        __update_cpu_capabilities(arm64_errata, scope_mask,
                                  "enabling workaround for");
+       __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
 }
 
 static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 
 static void __init enable_cpu_capabilities(u16 scope_mask)
 {
-       __enable_cpu_capabilities(arm64_features, scope_mask);
        __enable_cpu_capabilities(arm64_errata, scope_mask);
+       __enable_cpu_capabilities(arm64_features, scope_mask);
 }
 
 /*
index 60e5fc6..780a12f 100644 (file)
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-void (*handle_arch_irq)(struct pt_regs *) = NULL;
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       if (handle_arch_irq)
-               return;
-
-       handle_arch_irq = handle_irq;
-}
-
 #ifdef CONFIG_VMAP_STACK
 static void init_irq_stacks(void)
 {
index ecc6818..192b3ba 100644 (file)
@@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
                             unsigned long pgsize,
                             unsigned long ncontig)
 {
-       struct vm_area_struct vma = { .vm_mm = mm };
        pte_t orig_pte = huge_ptep_get(ptep);
        bool valid = pte_valid(orig_pte);
        unsigned long i, saddr = addr;
@@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
                        orig_pte = pte_mkdirty(orig_pte);
        }
 
-       if (valid)
+       if (valid) {
+               struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
                flush_tlb_range(&vma, saddr, addr);
+       }
        return orig_pte;
 }
 
@@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm,
                             unsigned long pgsize,
                             unsigned long ncontig)
 {
-       struct vm_area_struct vma = { .vm_mm = mm };
+       struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
        unsigned long i, saddr = addr;
 
        for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
index 325cfb3..9abf8a1 100644 (file)
@@ -611,11 +611,13 @@ void __init mem_init(void)
        BUILD_BUG_ON(TASK_SIZE_32                       > TASK_SIZE_64);
 #endif
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
        /*
         * Make sure we chose the upper bound of sizeof(struct page)
-        * correctly.
+        * correctly when sizing the VMEMMAP array.
         */
        BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+#endif
 
        if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
                extern int sysctl_overcommit_memory;
index 44f0ac0..516355a 100644 (file)
@@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
                flush_tlb_all();
        } else {
                /*
-                * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
-                * vma pointer.
+                * flush_tlb_range() takes a vma instead of a mm pointer because
+                * some architectures want the vm_flags for ITLB/DTLB flush.
                 */
-               struct vm_area_struct vma;
+               struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
 
-               vma.vm_mm = tlb->mm;
                /* flush the address range from the tlb: */
                flush_tlb_range(&vma, start, end);
                /* now flush the virt. page-table area mapping the address range: */
index bdb14a3..3b85c3e 100644 (file)
@@ -116,6 +116,7 @@ ia64_init_addr_space (void)
         */
        vma = vm_area_alloc(current->mm);
        if (vma) {
+               vma_set_anonymous(vma);
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -133,6 +134,7 @@ ia64_init_addr_space (void)
        if (!(current->personality & MMAP_PAGE_ZERO)) {
                vma = vm_area_alloc(current->mm);
                if (vma) {
+                       vma_set_anonymous(vma);
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -273,7 +275,7 @@ static struct vm_area_struct gate_vma;
 
 static int __init gate_vma_init(void)
 {
-       gate_vma.vm_mm = NULL;
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
index 785612b..b29f937 100644 (file)
@@ -2,6 +2,7 @@
 config M68K
        bool
        default y
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
        select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
        select HAVE_IDE
@@ -24,6 +25,10 @@ config M68K
        select MODULES_USE_ELF_RELA
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
+       select DMA_NONCOHERENT_OPS if HAS_DMA
+       select HAVE_MEMBLOCK
+       select ARCH_DISCARD_MEMBLOCK
+       select NO_BOOTMEM
 
 config CPU_BIG_ENDIAN
        def_bool y
index b2a6bc6..aef8d42 100644 (file)
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
 extern void dn_init_IRQ(void);
 extern u32 dn_gettimeoffset(void);
 extern int dn_dummy_hwclk(int, struct rtc_time *);
-extern int dn_dummy_set_clock_mmss(unsigned long);
 extern void dn_dummy_reset(void);
 #ifdef CONFIG_HEARTBEAT
 static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
        arch_gettimeoffset   = dn_gettimeoffset;
        mach_max_dma_address = 0xffffffff;
        mach_hwclk           = dn_dummy_hwclk; /* */
-       mach_set_clock_mmss  = dn_dummy_set_clock_mmss; /* */
        mach_reset           = dn_dummy_reset;  /* */
 #ifdef CONFIG_HEARTBEAT
        mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
 
 }
 
-int dn_dummy_set_clock_mmss(unsigned long nowtime)
-{
-       pr_info("set_clock_mmss\n");
-       return 0;
-}
-
 void dn_dummy_reset(void) {
 
   dn_serial_print("The end !\n");
index 565c6f0..bd96702 100644 (file)
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
 extern u32 atari_gettimeoffset(void);
 extern int atari_mste_hwclk (int, struct rtc_time *);
 extern int atari_tt_hwclk (int, struct rtc_time *);
-extern int atari_mste_set_clock_mmss (unsigned long);
-extern int atari_tt_set_clock_mmss (unsigned long);
-
 
 /* ++roman: This is a more elaborate test for an SCC chip, since the plain
  * Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
                ATARIHW_SET(TT_CLK);
                pr_cont(" TT_CLK");
                mach_hwclk = atari_tt_hwclk;
-               mach_set_clock_mmss = atari_tt_set_clock_mmss;
        }
        if (hwreg_present(&mste_rtc.sec_ones)) {
                ATARIHW_SET(MSTE_CLK);
                pr_cont(" MSTE_CLK");
                mach_hwclk = atari_mste_hwclk;
-               mach_set_clock_mmss = atari_mste_set_clock_mmss;
        }
        if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
            hwreg_write(&dma_wd.fdc_speed, 0)) {
index c549b48..9cca642 100644 (file)
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
     return( 0 );
 }
 
-
-int atari_mste_set_clock_mmss (unsigned long nowtime)
-{
-    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-    struct MSTE_RTC val;
-    unsigned char rtc_minutes;
-
-    mste_read(&val);
-    rtc_minutes= val.min_ones + val.min_tens * 10;
-    if ((rtc_minutes < real_minutes
-         ? real_minutes - rtc_minutes
-         : rtc_minutes - real_minutes) < 30)
-    {
-        val.sec_ones = real_seconds % 10;
-        val.sec_tens = real_seconds / 10;
-        val.min_ones = real_minutes % 10;
-        val.min_tens = real_minutes / 10;
-        mste_write(&val);
-    }
-    else
-        return -1;
-    return 0;
-}
-
-int atari_tt_set_clock_mmss (unsigned long nowtime)
-{
-    int retval = 0;
-    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-    unsigned char save_control, save_freq_select, rtc_minutes;
-
-    save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
-    RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
-
-    save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
-    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
-
-    rtc_minutes = RTC_READ (RTC_MINUTES);
-    if (!(save_control & RTC_DM_BINARY))
-       rtc_minutes = bcd2bin(rtc_minutes);
-
-    /* Since we're only adjusting minutes and seconds, don't interfere
-       with hour overflow.  This avoids messing with unknown time zones
-       but requires your RTC not to be off by more than 30 minutes.  */
-    if ((rtc_minutes < real_minutes
-         ? real_minutes - rtc_minutes
-         : rtc_minutes - real_minutes) < 30)
-        {
-            if (!(save_control & RTC_DM_BINARY))
-                {
-                   real_seconds = bin2bcd(real_seconds);
-                   real_minutes = bin2bcd(real_minutes);
-                }
-            RTC_WRITE (RTC_SECONDS, real_seconds);
-            RTC_WRITE (RTC_MINUTES, real_minutes);
-        }
-    else
-        retval = -1;
-
-    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
-    RTC_WRITE (RTC_CONTROL, save_control);
-    return retval;
-}
-
 /*
  * Local variables:
  *  c-indent-level: 4
index 2cfff47..143ee9f 100644 (file)
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
 extern void bvme6000_sched_init(irq_handler_t handler);
 extern u32 bvme6000_gettimeoffset(void);
 extern int bvme6000_hwclk (int, struct rtc_time *);
-extern int bvme6000_set_clock_mmss (unsigned long);
 extern void bvme6000_reset (void);
 void bvme6000_set_vectors (void);
 
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
     mach_init_IRQ        = bvme6000_init_IRQ;
     arch_gettimeoffset   = bvme6000_gettimeoffset;
     mach_hwclk           = bvme6000_hwclk;
-    mach_set_clock_mmss         = bvme6000_set_clock_mmss;
     mach_reset          = bvme6000_reset;
     mach_get_model       = bvme6000_get_model;
 
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
 
        return 0;
 }
-
-/*
- * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
- * clock is out by > 30 minutes.  Logic lifted from atari code.
- * Algorithm is to wait for the 10ms register to change, and then to
- * wait a short while, and then set it.
- */
-
-int bvme6000_set_clock_mmss (unsigned long nowtime)
-{
-       int retval = 0;
-       short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-       unsigned char rtc_minutes, rtc_tenms;
-       volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
-       unsigned char msr = rtc->msr & 0xc0;
-       unsigned long flags;
-       volatile int i;
-
-       rtc->msr = 0;           /* Ensure clock accessible */
-       rtc_minutes = bcd2bin (rtc->bcd_min);
-
-       if ((rtc_minutes < real_minutes
-               ? real_minutes - rtc_minutes
-                       : rtc_minutes - real_minutes) < 30)
-       {
-               local_irq_save(flags);
-               rtc_tenms = rtc->bcd_tenms;
-               while (rtc_tenms == rtc->bcd_tenms)
-                       ;
-               for (i = 0; i < 1000; i++)
-                       ;
-               rtc->bcd_min = bin2bcd(real_minutes);
-               rtc->bcd_sec = bin2bcd(real_seconds);
-               local_irq_restore(flags);
-       }
-       else
-               retval = -1;
-
-       rtc->msr = msr;
-
-       return retval;
-}
-
index a874e54..1d5483f 100644 (file)
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
 CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
 CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 8ce39e2..52a0af1 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 346c4e7..b3103e5 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
 CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index fca9c7a..fb7d651 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index f9eab17..6b37f55 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
 CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index b52e597..930cc29 100644 (file)
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -398,8 +395,8 @@ CONFIG_VETH=m
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MACSONIC=y
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 2a84eee..e7dd253 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
 CONFIG_ATARI_SCSI=y
 CONFIG_MAC_SCSI=y
 CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MACSONIC=y
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
 CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 476e699..b383327 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 1477cda..9783d3d 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index b3a543d..a35d10e 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index d543ed5..573bf92 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
 CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index a67e542..efb27a7 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
 CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 4d8d68c..a4b8d33 100644 (file)
@@ -1,6 +1,7 @@
 generic-y += barrier.h
 generic-y += compat.h
 generic-y += device.h
+generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += extable.h
index 93b47b1..54009ea 100644 (file)
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
  */
 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
        !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
-static inline int __ffs(int x)
+static inline unsigned long __ffs(unsigned long x)
 {
        __asm__ __volatile__ ("bitrev %0; ff1 %0"
                : "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
                : "dm" (x & -x));
        return 32 - cnt;
 }
-#define __ffs(x) (ffs(x) - 1)
+
+static inline unsigned long __ffs(unsigned long x)
+{
+       return ffs(x) - 1;
+}
 
 /*
  *     fls: find last bit set.
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644 (file)
index e3722ed..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_DMA_MAPPING_H
-#define _M68K_DMA_MAPPING_H
-
-extern const struct dma_map_ops m68k_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
-        return &m68k_dma_ops;
-}
-
-#endif  /* _M68K_DMA_MAPPING_H */
index ca2849a..aabe642 100644 (file)
@@ -1,6 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _M68K_IO_H
+#define _M68K_IO_H
+
 #if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
 #include <asm/io_no.h>
 #else
 #include <asm/io_mm.h>
 #endif
+
+#include <asm-generic/io.h>
+
+#endif /* _M68K_IO_H */
index fe485f4..782b78f 100644 (file)
  *    isa_readX(),isa_writeX()  are for ISA memory
  */
 
-#ifndef _IO_H
-#define _IO_H
+#ifndef _M68K_IO_MM_H
+#define _M68K_IO_MM_H
 
 #ifdef __KERNEL__
 
-#define ARCH_HAS_IOREMAP_WT
-
 #include <linux/compiler.h>
 #include <asm/raw_io.h>
 #include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
 #define writew(val, addr)      out_le16((addr), (val))
 #endif /* CONFIG_ATARI_ROM_ISA */
 
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
-/*
- * We need to define dummy functions for GENERIC_IOMAP support.
- */
-#define inb(port)          0xff
-#define inb_p(port)        0xff
-#define outb(val,port)     ((void)0)
-#define outb_p(val,port)   ((void)0)
-#define inw(port)          0xffff
-#define inw_p(port)        0xffff
-#define outw(val,port)     ((void)0)
-#define outw_p(val,port)   ((void)0)
-#define inl(port)          0xffffffffUL
-#define inl_p(port)        0xffffffffUL
-#define outl(val,port)     ((void)0)
-#define outl_p(val,port)   ((void)0)
-
-#define insb(port,buf,nr)  ((void)0)
-#define outsb(port,buf,nr) ((void)0)
-#define insw(port,buf,nr)  ((void)0)
-#define outsw(port,buf,nr) ((void)0)
-#define insl(port,buf,nr)  ((void)0)
-#define outsl(port,buf,nr) ((void)0)
-
-/*
- * These should be valid on any ioremap()ed region
- */
-#define readb(addr)      in_8(addr)
-#define writeb(val,addr) out_8((addr),(val))
-#define readw(addr)      in_le16(addr)
-#define writew(val,addr) out_le16((addr),(val))
-
-#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
-
 #define readl(addr)      in_le32(addr)
 #define writel(val,addr) out_le32((addr),(val))
 
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
 #define writew_relaxed(b, addr)        writew(b, addr)
 #define writel_relaxed(b, addr)        writel(b, addr)
 
-#endif /* _IO_H */
+#endif /* _M68K_IO_MM_H */
index 83a0a6d..0498192 100644 (file)
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
 #define PCI_SPACE_LIMIT        PCI_IO_MASK
 #endif /* CONFIG_PCI */
 
-/*
- * These are defined in kmap.h as static inline functions. To maintain
- * previous behavior we put these define guards here so io_mm.h doesn't
- * see them.
- */
-#ifdef CONFIG_MMU
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
-#endif
-
 #include <asm/kmap.h>
 #include <asm/virtconvert.h>
-#include <asm-generic/io.h>
 
 #endif /* _M68KNOMMU_IO_H */
index 84b8333..aac7f04 100644 (file)
@@ -4,6 +4,8 @@
 
 #ifdef CONFIG_MMU
 
+#define ARCH_HAS_IOREMAP_WT
+
 /* Values for nocacheflag and cmode */
 #define IOMAP_FULL_CACHING             0
 #define IOMAP_NOCACHE_SER              1
@@ -16,6 +18,7 @@
  */
 extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
                               int cacheflag);
+#define iounmap iounmap
 extern void iounmap(void __iomem *addr);
 extern void __iounmap(void *addr, unsigned long size);
 
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
 }
 
 #define ioremap_uc ioremap_nocache
+#define ioremap_wt ioremap_wt
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
                                       unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
 
-#define ioremap_fillcache ioremap_fullcache
+#define ioremap_fullcache ioremap_fullcache
 static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
                                              unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
 }
 
+#define memset_io memset_io
 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
                             int count)
 {
        __builtin_memset((void __force *) addr, val, count);
 }
 
+#define memcpy_fromio memcpy_fromio
 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
                                 int count)
 {
        __builtin_memcpy(dst, (void __force *) src, count);
 }
 
+#define memcpy_toio memcpy_toio
 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                               int count)
 {
index 1605da4..49bd326 100644 (file)
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
 extern unsigned int (*mach_get_ss)(void);
 extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-extern int (*mach_set_clock_mmss)(unsigned long);
 extern void (*mach_reset)( void );
 extern void (*mach_halt)( void );
 extern void (*mach_power_off)( void );
index 9b840c0..08cee11 100644 (file)
@@ -57,7 +57,6 @@ struct mac_model
 #define MAC_SCSI_IIFX          5
 #define MAC_SCSI_DUO           6
 #define MAC_SCSI_LC            7
-#define MAC_SCSI_LATE          8
 
 #define MAC_IDE_NONE           0
 #define MAC_IDE_QUADRA         1
index e644c4d..6bbe520 100644 (file)
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 #define __pa(vaddr)            ((unsigned long)(vaddr))
-#define __va(paddr)            ((void *)(paddr))
+#define __va(paddr)            ((void *)((unsigned long)(paddr)))
 
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
index 463572c..e99993c 100644 (file)
@@ -6,7 +6,7 @@
 
 #undef DEBUG
 
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
@@ -19,7 +19,7 @@
 
 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 
-static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                gfp_t flag, unsigned long attrs)
 {
        struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        return addr;
 }
 
-static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+void arch_dma_free(struct device *dev, size_t size, void *addr,
                dma_addr_t handle, unsigned long attrs)
 {
        pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
 
 #include <asm/cacheflush.h>
 
-static void *m68k_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
        return ret;
 }
 
-static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
                dma_addr_t dma_handle, unsigned long attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
 
 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
 
-static void m68k_dma_sync_single_for_device(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+               size_t size, enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
        }
 }
 
-static void m68k_dma_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sglist, int nents, enum dma_data_direction dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               dma_sync_single_for_device(dev, sg->dma_address, sg->length,
-                                          dir);
-       }
-}
-
-static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t handle = page_to_phys(page) + offset;
-
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_sync_single_for_device(dev, handle, size, dir);
-
-       return handle;
-}
-
-static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               sg->dma_address = sg_phys(sg);
-
-               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-                       continue;
-
-               dma_sync_single_for_device(dev, sg->dma_address, sg->length,
-                                          dir);
-       }
-       return nents;
-}
-
-const struct dma_map_ops m68k_dma_ops = {
-       .alloc                  = m68k_dma_alloc,
-       .free                   = m68k_dma_free,
-       .map_page               = m68k_dma_map_page,
-       .map_sg                 = m68k_dma_map_sg,
-       .sync_single_for_device = m68k_dma_sync_single_for_device,
-       .sync_sg_for_device     = m68k_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(m68k_dma_ops);
-
 void arch_setup_pdev_archdata(struct platform_device *pdev)
 {
        if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
index f35e3eb..5d3596c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
 /* machine dependent timer functions */
 int (*mach_hwclk) (int, struct rtc_time*);
 EXPORT_SYMBOL(mach_hwclk);
-int (*mach_set_clock_mmss) (unsigned long);
 unsigned int (*mach_get_ss)(void);
 int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
                                        be32_to_cpu(m->addr);
                                m68k_memory[m68k_num_memory].size =
                                        be32_to_cpu(m->size);
+                               memblock_add(m68k_memory[m68k_num_memory].addr,
+                                            m68k_memory[m68k_num_memory].size);
                                m68k_num_memory++;
                        } else
                                pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
 
 void __init setup_arch(char **cmdline_p)
 {
-#ifndef CONFIG_SUN3
-       int i;
-#endif
-
        /* The bootinfo is located right after the kernel */
        if (!CPU_IS_COLDFIRE)
                m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
 #ifndef CONFIG_SUN3
-       for (i = 1; i < m68k_num_memory; i++)
-               free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
-                                 m68k_memory[i].size);
 #ifdef CONFIG_BLK_DEV_INITRD
        if (m68k_ramdisk.size) {
-               reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
-                                    m68k_ramdisk.addr, m68k_ramdisk.size,
-                                    BOOTMEM_DEFAULT);
+               memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
                initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
                initrd_end = initrd_start + m68k_ramdisk.size;
                pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
index a98af10..cfd5475 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
 
 /* machine dependent timer functions */
 void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
-int (*mach_set_clock_mmss)(unsigned long);
 int (*mach_hwclk) (int, struct rtc_time*);
 
 /* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
 
 void __init setup_arch(char **cmdline_p)
 {
-       int bootmap_size;
-
        memory_start = PAGE_ALIGN(_ramstart);
        memory_end = _ramend;
 
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
        pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
                 __bss_stop, memory_start, memory_start, memory_end);
 
+       memblock_add(memory_start, memory_end - memory_start);
+
        /* Keep a copy of command line */
        *cmdline_p = &command_line[0];
        memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
        min_low_pfn = PFN_DOWN(memory_start);
        max_pfn = max_low_pfn = PFN_DOWN(memory_end);
 
-       bootmap_size = init_bootmem_node(
-                       NODE_DATA(0),
-                       min_low_pfn,            /* map goes here */
-                       PFN_DOWN(PAGE_OFFSET),
-                       max_pfn);
-       /*
-        * Free the usable memory, we have to make sure we do not free
-        * the bootmem bitmap so we then reserve it after freeing it :-)
-        */
-       free_bootmem(memory_start, memory_end - memory_start);
-       reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-
 #if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
        if ((initrd_start > 0) && (initrd_start < initrd_end) &&
                        (initrd_end < memory_end))
-               reserve_bootmem(initrd_start, initrd_end - initrd_start,
-                                BOOTMEM_DEFAULT);
+               memblock_reserve(initrd_start, initrd_end - initrd_start);
 #endif /* if defined(CONFIG_BLK_DEV_INITRD) */
 
        /*
index e522307..b02d725 100644 (file)
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
 /* Mac specific timer functions */
 extern u32 mac_gettimeoffset(void);
 extern int mac_hwclk(int, struct rtc_time *);
-extern int mac_set_clock_mmss(unsigned long);
 extern void iop_preinit(void);
 extern void iop_init(void);
 extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
        mach_get_model = mac_get_model;
        arch_gettimeoffset = mac_gettimeoffset;
        mach_hwclk = mac_hwclk;
-       mach_set_clock_mmss = mac_set_clock_mmss;
        mach_reset = mac_reset;
        mach_halt = mac_poweroff;
        mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "PowerBook 520",
                .adb_type       = MAC_ADB_PB2,
                .via_type       = MAC_VIA_QUADRA,
-               .scsi_type      = MAC_SCSI_LATE,
+               .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
        },
 };
 
-static const struct resource mac_scsi_late_rsrc[] __initconst = {
-       {
-               .flags = IORESOURCE_IRQ,
-               .start = IRQ_MAC_SCSI,
-               .end   = IRQ_MAC_SCSI,
-       }, {
-               .flags = IORESOURCE_MEM,
-               .start = 0x50010000,
-               .end   = 0x50011FFF,
-       },
-};
-
 static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
        {
                .flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
                platform_device_register_simple("mac_scsi", 0,
                        mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
                break;
-       case MAC_SCSI_LATE:
-               /* XXX PDMA support for PowerBook 500 series needs testing */
-               platform_device_register_simple("mac_scsi", 0,
-                       mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
-               break;
        case MAC_SCSI_LC:
                /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
                 * Also from the Developer Notes for Classic II, LC III,
index c680543..19e9d8e 100644 (file)
 
 #include <asm/machdep.h>
 
-/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 
 #define RTC_OFFSET 2082844800
 
 static void (*rom_reset)(void);
 
 #ifdef CONFIG_ADB_CUDA
-static long cuda_read_time(void)
+static time64_t cuda_read_time(void)
 {
        struct adb_request req;
-       long time;
+       time64_t time;
 
        if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
                return 0;
        while (!req.complete)
                cuda_poll();
 
-       time = (req.reply[3] << 24) | (req.reply[4] << 16) |
-              (req.reply[5] << 8) | req.reply[6];
+       time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
+                    (req.reply[5] << 8) | req.reply[6]);
+
        return time - RTC_OFFSET;
 }
 
-static void cuda_write_time(long data)
+static void cuda_write_time(time64_t time)
 {
        struct adb_request req;
+       u32 data = lower_32_bits(time + RTC_OFFSET);
 
-       data += RTC_OFFSET;
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                         (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -86,26 +91,27 @@ static void cuda_write_pram(int offset, __u8 data)
 #endif /* CONFIG_ADB_CUDA */
 
 #ifdef CONFIG_ADB_PMU68K
-static long pmu_read_time(void)
+static time64_t pmu_read_time(void)
 {
        struct adb_request req;
-       long time;
+       time64_t time;
 
        if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
                return 0;
        while (!req.complete)
                pmu_poll();
 
-       time = (req.reply[1] << 24) | (req.reply[2] << 16) |
-              (req.reply[3] << 8) | req.reply[4];
+       time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
+                    (req.reply[3] << 8) | req.reply[4]);
+
        return time - RTC_OFFSET;
 }
 
-static void pmu_write_time(long data)
+static void pmu_write_time(time64_t time)
 {
        struct adb_request req;
+       u32 data = lower_32_bits(time + RTC_OFFSET);
 
-       data += RTC_OFFSET;
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
                        (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                        (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
  * is basically any machine with Mac II-style ADB.
  */
 
-static long via_read_time(void)
+static time64_t via_read_time(void)
 {
        union {
                __u8 cdata[4];
-               long idata;
+               __u32 idata;
        } result, last_result;
        int count = 1;
 
@@ -270,7 +276,7 @@ static long via_read_time(void)
                via_pram_command(0x8D, &result.cdata[0]);
 
                if (result.idata == last_result.idata)
-                       return result.idata - RTC_OFFSET;
+                       return (time64_t)result.idata - RTC_OFFSET;
 
                if (++count > 10)
                        break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
                last_result.idata = result.idata;
        }
 
-       pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n",
-              last_result.idata, result.idata);
+       pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
+              __func__, last_result.idata, result.idata);
 
        return 0;
 }
@@ -291,11 +297,11 @@ static long via_read_time(void)
  * is basically any machine with Mac II-style ADB.
  */
 
-static void via_write_time(long time)
+static void via_write_time(time64_t time)
 {
        union {
                __u8 cdata[4];
-               long idata;
+               __u32 idata;
        } data;
        __u8 temp;
 
@@ -304,7 +310,7 @@ static void via_write_time(long time)
        temp = 0x55;
        via_pram_command(0x35, &temp);
 
-       data.idata = time + RTC_OFFSET;
+       data.idata = lower_32_bits(time + RTC_OFFSET);
        via_pram_command(0x01, &data.cdata[3]);
        via_pram_command(0x05, &data.cdata[2]);
        via_pram_command(0x09, &data.cdata[1]);
@@ -585,12 +591,15 @@ void mac_reset(void)
  * This function translates seconds since 1970 into a proper date.
  *
  * Algorithm cribbed from glibc2.1, __offtime().
+ *
+ * This is roughly same as rtc_time64_to_tm(), which we should probably
+ * use here, but it's only available when CONFIG_RTC_LIB is enabled.
  */
 #define SECS_PER_MINUTE (60)
 #define SECS_PER_HOUR  (SECS_PER_MINUTE * 60)
 #define SECS_PER_DAY   (SECS_PER_HOUR * 24)
 
-static void unmktime(unsigned long time, long offset,
+static void unmktime(time64_t time, long offset,
                     int *yearp, int *monp, int *dayp,
                     int *hourp, int *minp, int *secp)
 {
@@ -602,11 +611,10 @@ static void unmktime(unsigned long time, long offset,
                /* Leap years.  */
                { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
        };
-       long int days, rem, y, wday, yday;
+       int days, rem, y, wday, yday;
        const unsigned short int *ip;
 
-       days = time / SECS_PER_DAY;
-       rem = time % SECS_PER_DAY;
+       days = div_u64_rem(time, SECS_PER_DAY, &rem);
        rem += offset;
        while (rem < 0) {
                rem += SECS_PER_DAY;
@@ -657,7 +665,7 @@ static void unmktime(unsigned long time, long offset,
 
 int mac_hwclk(int op, struct rtc_time *t)
 {
-       unsigned long now;
+       time64_t now;
 
        if (!op) { /* read */
                switch (macintosh_config->adb_type) {
@@ -693,8 +701,8 @@ int mac_hwclk(int op, struct rtc_time *t)
                         __func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
                         t->tm_hour, t->tm_min, t->tm_sec);
 
-               now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                            t->tm_hour, t->tm_min, t->tm_sec);
+               now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+                              t->tm_hour, t->tm_min, t->tm_sec);
 
                switch (macintosh_config->adb_type) {
                case MAC_ADB_IOP:
@@ -719,19 +727,3 @@ int mac_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-/*
- * Set minutes/seconds in the hardware clock
- */
-
-int mac_set_clock_mmss (unsigned long nowtime)
-{
-       struct rtc_time now;
-
-       mac_hwclk(0, &now);
-       now.tm_sec = nowtime % 60;
-       now.tm_min = (nowtime / 60) % 60;
-       mac_hwclk(1, &now);
-
-       return 0;
-}
index 8827b7f..38e2b27 100644 (file)
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
                pg_data_table[i] = pg_data_map + node;
        }
 #endif
-       pg_data_map[node].bdata = bootmem_node_data + node;
        node_set_online(node);
 }
 
index 2925d79..70dde04 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
 
 void __init cf_bootmem_alloc(void)
 {
-       unsigned long start_pfn;
        unsigned long memstart;
 
        /* _rambase and _ramend will be naturally page aligned */
        m68k_memory[0].addr = _rambase;
        m68k_memory[0].size = _ramend - _rambase;
 
+       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+
        /* compute total pages in system */
        num_pages = PFN_DOWN(_ramend - _rambase);
 
        /* page numbers */
        memstart = PAGE_ALIGN(_ramstart);
        min_low_pfn = PFN_DOWN(_rambase);
-       start_pfn = PFN_DOWN(memstart);
        max_pfn = max_low_pfn = PFN_DOWN(_ramend);
        high_memory = (void *)_ramend;
 
+       /* Reserve kernel text/data/bss */
+       memblock_reserve(memstart, memstart - _rambase);
+
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
 
-       /* setup bootmem data */
+       /* setup node data */
        m68k_setup_node(0);
-       memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
-               min_low_pfn, max_low_pfn);
-       free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
 }
 
 /*
index e490ecc..4e17ecb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 
 #include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = { 0, };
        unsigned long min_addr, max_addr;
-       unsigned long addr, size, end;
+       unsigned long addr;
        int i;
 
 #ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
        min_low_pfn = availmem >> PAGE_SHIFT;
        max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
 
-       for (i = 0; i < m68k_num_memory; i++) {
-               addr = m68k_memory[i].addr;
-               end = addr + m68k_memory[i].size;
-               m68k_setup_node(i);
-               availmem = PAGE_ALIGN(availmem);
-               availmem += init_bootmem_node(NODE_DATA(i),
-                                             availmem >> PAGE_SHIFT,
-                                             addr >> PAGE_SHIFT,
-                                             end >> PAGE_SHIFT);
-       }
+       /* Reserve kernel text/data/bss and the memory allocated in head.S */
+       memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
 
        /*
         * Map the physical memory available into the kernel virtual
-        * address space. First initialize the bootmem allocator with
-        * the memory we already mapped, so map_node() has something
-        * to allocate.
+        * address space. Make sure memblock will not try to allocate
+        * pages beyond the memory we already mapped in head.S
         */
-       addr = m68k_memory[0].addr;
-       size = m68k_memory[0].size;
-       free_bootmem_node(NODE_DATA(0), availmem,
-                         min(m68k_init_mapped_size, size) - (availmem - addr));
-       map_node(0);
-       if (size > m68k_init_mapped_size)
-               free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
-                                 size - m68k_init_mapped_size);
-
-       for (i = 1; i < m68k_num_memory; i++)
+       memblock_set_bottom_up(true);
+
+       for (i = 0; i < m68k_num_memory; i++) {
+               m68k_setup_node(i);
                map_node(i);
+       }
 
        flush_tlb_all();
 
index f8a710f..adea549 100644 (file)
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
 extern void mvme147_sched_init(irq_handler_t handler);
 extern u32 mvme147_gettimeoffset(void);
 extern int mvme147_hwclk (int, struct rtc_time *);
-extern int mvme147_set_clock_mmss (unsigned long);
 extern void mvme147_reset (void);
 
 
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
        mach_init_IRQ           = mvme147_init_IRQ;
        arch_gettimeoffset      = mvme147_gettimeoffset;
        mach_hwclk              = mvme147_hwclk;
-       mach_set_clock_mmss     = mvme147_set_clock_mmss;
        mach_reset              = mvme147_reset;
        mach_get_model          = mvme147_get_model;
 
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-int mvme147_set_clock_mmss (unsigned long nowtime)
-{
-       return 0;
-}
index 4ffd9ef..6ee36a5 100644 (file)
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
 extern void mvme16x_sched_init(irq_handler_t handler);
 extern u32 mvme16x_gettimeoffset(void);
 extern int mvme16x_hwclk (int, struct rtc_time *);
-extern int mvme16x_set_clock_mmss (unsigned long);
 extern void mvme16x_reset (void);
 
 int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
     mach_init_IRQ        = mvme16x_init_IRQ;
     arch_gettimeoffset   = mvme16x_gettimeoffset;
     mach_hwclk           = mvme16x_hwclk;
-    mach_set_clock_mmss         = mvme16x_set_clock_mmss;
     mach_reset          = mvme16x_reset;
     mach_get_model       = mvme16x_get_model;
     mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-int mvme16x_set_clock_mmss (unsigned long nowtime)
-{
-       return 0;
-}
-
index 71c0867..96810d9 100644 (file)
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
 static u32 q40_gettimeoffset(void);
 static int q40_hwclk(int, struct rtc_time *);
 static unsigned int q40_get_ss(void);
-static int q40_set_clock_mmss(unsigned long);
 static int q40_get_rtc_pll(struct rtc_pll_info *pll);
 static int q40_set_rtc_pll(struct rtc_pll_info *pll);
 
@@ -175,7 +174,6 @@ void __init config_q40(void)
        mach_get_ss = q40_get_ss;
        mach_get_rtc_pll = q40_get_rtc_pll;
        mach_set_rtc_pll = q40_set_rtc_pll;
-       mach_set_clock_mmss = q40_set_clock_mmss;
 
        mach_reset = q40_reset;
        mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
        return bcd2bin(Q40_RTC_SECS);
 }
 
-/*
- * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
- * clock is out by > 30 minutes.  Logic lifted from atari code.
- */
-
-static int q40_set_clock_mmss(unsigned long nowtime)
-{
-       int retval = 0;
-       short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
-       int rtc_minutes;
-
-       rtc_minutes = bcd2bin(Q40_RTC_MINS);
-
-       if ((rtc_minutes < real_minutes ?
-            real_minutes - rtc_minutes :
-            rtc_minutes - real_minutes) < 30) {
-               Q40_RTC_CTRL |= Q40_RTC_WRITE;
-               Q40_RTC_MINS = bin2bcd(real_minutes);
-               Q40_RTC_SECS = bin2bcd(real_seconds);
-               Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
-       } else
-               retval = -1;
-
-       return retval;
-}
-
-
 /* get and set PLL calibration of RTC clock */
 #define Q40_RTC_PLL_MASK ((1<<5)-1)
 #define Q40_RTC_PLL_SIGN (1<<5)
index 1d28d38..79a2bb8 100644 (file)
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
        availmem = memory_start;
 
        m68k_setup_node(0);
-       availmem += init_bootmem(start_page, num_pages);
-       availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
-
-       free_bootmem(__pa(availmem), memory_end - (availmem));
 }
 
 
index 10a405d..c782b10 100644 (file)
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
 
 void ath79_ddr_wb_flush(u32 reg)
 {
-       void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+       void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
 
        /* Flush the DDR write buffer. */
        __raw_writel(0x1, flush_reg);
index 8c9cbf1..6054d49 100644 (file)
@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
                 */
                if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
                        cpu_wait = NULL;
-
-               /*
-                * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
-                * Enable ExternalSync for sync instruction to take effect
-                */
-               set_c0_config7(MIPS_CONF7_ES);
                break;
 #endif
        }
index 0bc2708..ae461d9 100644 (file)
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
 #define MIPS_CONF7_RPS         (_ULCAST_(1) << 2)
-/* ExternalSync */
-#define MIPS_CONF7_ES          (_ULCAST_(1) << 8)
 
 #define MIPS_CONF7_IAR         (_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR          (_ULCAST_(1) << 16)
@@ -2767,7 +2765,6 @@ __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
 __BUILD_SET_C0(config5)
-__BUILD_SET_C0(config7)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
index 9632436..c2e94cf 100644 (file)
@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
        phys_addr_t size = resource_size(rsrc);
 
        *start = fixup_bigphys_addr(rsrc->start, size);
-       *end = rsrc->start + size;
+       *end = rsrc->start + size - 1;
 }
index 9ecad05..dfb6a79 100644 (file)
@@ -27,7 +27,6 @@ config OPENRISC
        select GENERIC_STRNLEN_USER
        select GENERIC_SMP_IDLE_THREAD
        select MODULES_USE_ELF_RELA
-       select MULTI_IRQ_HANDLER
        select HAVE_DEBUG_STACKOVERFLOW
        select OR1K_PIC
        select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
        select ARCH_USE_QUEUED_RWLOCKS
        select OMPIC if SMP
        select ARCH_WANT_FRAME_POINTERS
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config CPU_BIG_ENDIAN
        def_bool y
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
 config LOCKDEP_SUPPORT
        def_bool  y
 
-config MULTI_IRQ_HANDLER
-       def_bool y
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
index d9eee0a..eb612b1 100644 (file)
@@ -24,6 +24,4 @@
 
 #define NO_IRQ         (-1)
 
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
 #endif /* __ASM_OPENRISC_IRQ_H__ */
index 35e478a..5f9445e 100644 (file)
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
        irqchip_init();
 }
 
-static void (*handle_arch_irq)(struct pt_regs *);
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       handle_arch_irq = handle_irq;
-}
-
 void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        handle_arch_irq(regs);
index 17526be..e7705dd 100644 (file)
@@ -11,7 +11,6 @@ config PARISC
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_HAS_UBSAN_SANITIZE_ALL
-       select ARCH_WANTS_UBSAN_NO_NULL
        select ARCH_SUPPORTS_MEMORY_FAILURE
        select RTC_CLASS
        select RTC_DRV_GENERIC
@@ -195,7 +194,7 @@ config PREFETCH
 
 config MLONGCALLS
        bool "Enable the -mlong-calls compiler option for big kernels"
-       def_bool y if (!MODULES)
+       default y
        depends on PA8X00
        help
          If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..dbaaca8
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+   which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb()           do { synchronize_caches(); } while (0)
+#define rmb()          mb()
+#define wmb()          mb()
+#define dma_rmb()      mb()
+#define dma_wmb()      mb()
+#else
+#define mb()           barrier()
+#define rmb()          barrier()
+#define wmb()          barrier()
+#define dma_rmb()      barrier()
+#define dma_wmb()      barrier()
+#endif
+
+#define __smp_mb()     mb()
+#define __smp_rmb()    mb()
+#define __smp_wmb()    mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
index e95207c..1b4732e 100644 (file)
        /* Release pa_tlb_lock lock without reloading lock address. */
        .macro          tlb_unlock0     spc,tmp
 #ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       sync
        or,COND(=)      %r0,\spc,%r0
        stw             \spc,0(\tmp)
 #endif
index 22e6374..97451e6 100644 (file)
@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local)
        .macro  tlb_unlock      la,flags,tmp
 #ifdef CONFIG_SMP
        ldi             1,\tmp
+       sync
        stw             \tmp,0(\la)
        mtsm            \flags
 #endif
index e775f80..4886a6d 100644 (file)
@@ -633,6 +633,7 @@ cas_action:
        sub,<>  %r28, %r25, %r0
 2:     stw,ma  %r24, 0(%r26)
        /* Free lock */
+       sync
        stw,ma  %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
@@ -647,6 +648,7 @@ cas_action:
 3:             
        /* Error occurred on load or store */
        /* Free lock */
+       sync
        stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
@@ -848,6 +850,7 @@ cas2_action:
 
 cas2_end:
        /* Free lock */
+       sync
        stw,ma  %r20, 0(%sr2,%r20)
        /* Enable interrupts */
        ssm     PSW_SM_I, %r0
@@ -858,6 +861,7 @@ cas2_end:
 22:
        /* Error occurred on load or store */
        /* Free lock */
+       sync
        stw     %r20, 0(%sr2,%r20)
        ssm     PSW_SM_I, %r0
        ldo     1(%r0),%r28
index 79d570c..b2f89b6 100644 (file)
@@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
 {
        int c;
 
-       c = atomic_dec_if_positive(&mm->context.copros);
-
-       /* Detect imbalance between add and remove */
-       WARN_ON(c < 0);
-
        /*
-        * Need to broadcast a global flush of the full mm before
-        * decrementing active_cpus count, as the next TLBI may be
-        * local and the nMMU and/or PSL need to be cleaned up.
-        * Should be rare enough so that it's acceptable.
+        * When removing the last copro, we need to broadcast a global
+        * flush of the full mm, as the next TLBI may be local and the
+        * nMMU and/or PSL need to be cleaned up.
+        *
+        * Both the 'copros' and 'active_cpus' counts are looked at in
+        * flush_all_mm() to determine the scope (local/global) of the
+        * TLBIs, so we need to flush first before decrementing
+        * 'copros'. If this API is used by several callers for the
+        * same context, it can lead to over-flushing. It's hopefully
+        * not common enough to be a problem.
         *
         * Skip on hash, as we don't know how to do the proper flush
         * for the time being. Invalidations will remain global if
-        * used on hash.
+        * used on hash. Note that we can't drop 'copros' either, as
+        * it could make some invalidations local with no flush
+        * in-between.
         */
-       if (c == 0 && radix_enabled()) {
+       if (radix_enabled()) {
                flush_all_mm(mm);
-               dec_mm_active_cpus(mm);
+
+               c = atomic_dec_if_positive(&mm->context.copros);
+               /* Detect imbalance between add and remove */
+               WARN_ON(c < 0);
+
+               if (c == 0)
+                       dec_mm_active_cpus(mm);
        }
 }
 #else
index fe9733f..471aac3 100644 (file)
@@ -42,6 +42,8 @@
 #include <asm/ppc-pci.h>
 #include <asm/eeh.h>
 
+#include "../../../drivers/pci/pci.h"
+
 /* hose_spinlock protects accesses to the the phb_bitmap. */
 static DEFINE_SPINLOCK(hose_spinlock);
 LIST_HEAD(hose_list);
@@ -1014,7 +1016,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
                /* Cardbus can call us to add new devices to a bus, so ignore
                 * those who are already fully discovered
                 */
-               if (dev->is_added)
+               if (pci_dev_is_added(dev))
                        continue;
 
                pcibios_setup_device(dev);
index 380cbf9..c0a9bcd 100644 (file)
@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                u64 imm64;
                u8 *func;
                u32 true_cond;
+               u32 tmp_idx;
 
                /*
                 * addrs[] maps a BPF bytecode address into a real offset from
@@ -637,11 +638,7 @@ emit_clear:
                case BPF_STX | BPF_XADD | BPF_W:
                        /* Get EA into TMP_REG_1 */
                        PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-                       /* error if EA is not word-aligned */
-                       PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_JMP(exit_addr);
+                       tmp_idx = ctx->idx * 4;
                        /* load value from memory into TMP_REG_2 */
                        PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
                        /* add value from src_reg into this */
@@ -649,32 +646,16 @@ emit_clear:
                        /* store result back */
                        PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
                        /* we're done if this succeeded */
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
-                       /* otherwise, let's try once more */
-                       PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-                       PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-                       PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       /* exit if the store was not successful */
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_BCC(COND_NE, exit_addr);
+                       PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
                /* *(u64 *)(dst + off) += src */
                case BPF_STX | BPF_XADD | BPF_DW:
                        PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-                       /* error if EA is not doubleword-aligned */
-                       PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_JMP(exit_addr);
-                       PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-                       PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-                       PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+                       tmp_idx = ctx->idx * 4;
                        PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
                        PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
                        PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_BCC(COND_NE, exit_addr);
+                       PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
 
                /*
index 5bd0eb6..70b2e1e 100644 (file)
@@ -46,6 +46,7 @@
 
 #include "powernv.h"
 #include "pci.h"
+#include "../../../../drivers/pci/pci.h"
 
 #define PNV_IODA1_M64_NUM      16      /* Number of M64 BARs   */
 #define PNV_IODA1_M64_SEGS     8       /* Segments per M64 BAR */
@@ -3138,7 +3139,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
        struct pci_dn *pdn;
        int mul, total_vfs;
 
-       if (!pdev->is_physfn || pdev->is_added)
+       if (!pdev->is_physfn || pci_dev_is_added(pdev))
                return;
 
        pdn = pci_get_pdn(pdev);
index 139f0af..8a4868a 100644 (file)
@@ -71,6 +71,7 @@
 #include <asm/security_features.h>
 
 #include "pseries.h"
+#include "../../../../drivers/pci/pci.h"
 
 int CMO_PrPSP = -1;
 int CMO_SecPSP = -1;
@@ -664,7 +665,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
        const int *indexes;
        struct device_node *dn = pci_device_to_OF_node(pdev);
 
-       if (!pdev->is_physfn || pdev->is_added)
+       if (!pdev->is_physfn || pci_dev_is_added(pdev))
                return;
        /*Firmware must support open sriov otherwise dont configure*/
        indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
index e44bb2b..4fe5b2a 100644 (file)
@@ -106,7 +106,6 @@ config S390
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
-       select ARCH_WANTS_UBSAN_NO_NULL
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS2
@@ -140,7 +139,7 @@ config S390
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
        select HAVE_FUTEX_CMPXCHG if FUTEX
-       select HAVE_GCC_PLUGINS
+       select HAVE_GCC_PLUGINS if BROKEN
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
index ac67828..410b263 100644 (file)
@@ -13,6 +13,7 @@ generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += module.h
+generic-y += msi.h
 generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += serial.h
diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h
deleted file mode 100644 (file)
index 3c17c10..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * msi.h:  Defines specific to the MBus - Sbus - Interface.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
- */
-
-#ifndef _SPARC_MSI_H
-#define _SPARC_MSI_H
-
-/*
- * Locations of MSI Registers.
- */
-#define MSI_MBUS_ARBEN 0xe0001008      /* MBus Arbiter Enable register */
-
-/*
- * Useful bits in the MSI Registers.
- */
-#define MSI_ASYNC_MODE  0x80000000     /* Operate the MSI asynchronously */
-
-
-static inline void msi_set_sync(void)
-{
-       __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
-                             "andn %%g3, %2, %%g3\n\t"
-                             "sta %%g3, [%0] %1\n\t" : :
-                             "r" (MSI_MBUS_ARBEN),
-                             "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
-}
-
-#endif /* !(_SPARC_MSI_H) */
index 2ef8cfa..f0eba72 100644 (file)
@@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
        }
 }
 
-static void init_tick_ops(struct sparc64_tick_ops *ops)
+static void __init init_tick_ops(struct sparc64_tick_ops *ops)
 {
        unsigned long freq, quotient, tick;
 
index 1d70c3f..be9cb00 100644 (file)
@@ -37,7 +37,6 @@
 #include <asm/mbus.h>
 #include <asm/page.h>
 #include <asm/asi.h>
-#include <asm/msi.h>
 #include <asm/smp.h>
 #include <asm/io.h>
 
@@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
        set_pte((pte_t *)ctxp, pte);
 }
 
+/*
+ * Locations of MSI Registers.
+ */
+#define MSI_MBUS_ARBEN 0xe0001008      /* MBus Arbiter Enable register */
+
+/*
+ * Useful bits in the MSI Registers.
+ */
+#define MSI_ASYNC_MODE  0x80000000     /* Operate the MSI asynchronously */
+
+static void msi_set_sync(void)
+{
+       __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
+                             "andn %%g3, %2, %%g3\n\t"
+                             "sta %%g3, [%0] %1\n\t" : :
+                             "r" (MSI_MBUS_ARBEN),
+                             "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
+}
+
 void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
        unsigned long ptp;      /* Physical address, shifted right by 4 */
index fa42f89..169c2fe 100644 (file)
@@ -106,9 +106,13 @@ define cmd_check_data_rel
        done
 endef
 
+# We need to run two commands under "if_changed", so merge them into a
+# single invocation.
+quiet_cmd_check-and-link-vmlinux = LD      $@
+      cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
+
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
-       $(call if_changed,check_data_rel)
-       $(call if_changed,ld)
+       $(call if_changed,check-and-link-vmlinux)
 
 OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
 $(obj)/vmlinux.bin: vmlinux FORCE
index e98522e..1458b17 100644 (file)
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c)         \
                                                                        \
        table = (typeof(table))sys_table;                               \
                                                                        \
-       c->runtime_services = table->runtime;                           \
-       c->boot_services = table->boottime;                             \
-       c->text_output = table->con_out;                                \
+       c->runtime_services     = table->runtime;                       \
+       c->boot_services        = table->boottime;                      \
+       c->text_output          = table->con_out;                       \
 }
 BOOT_SERVICES(32);
 BOOT_SERVICES(64);
 
-static inline efi_status_t __open_volume32(void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_32_t *image = __image;
-       efi_file_handle_32_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-       unsigned long func;
-
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       func = (unsigned long)io->open_volume;
-       status = efi_early->call(func, io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
-static inline efi_status_t __open_volume64(void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_64_t *image = __image;
-       efi_file_handle_64_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-       unsigned long func;
-
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       func = (unsigned long)io->open_volume;
-       status = efi_early->call(func, io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
-efi_status_t
-efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
-{
-       if (efi_early->is64)
-               return __open_volume64(__image, __fh);
-
-       return __open_volume32(__image, __fh);
-}
-
 void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 {
        efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,7 +48,7 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 }
 
 static efi_status_t
-__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
 {
        struct pci_setup_rom *rom = NULL;
        efi_status_t status;
@@ -134,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
 
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for rom\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
                return status;
        }
 
        memset(rom, 0, sizeof(*rom));
 
-       rom->data.type = SETUP_PCI;
-       rom->data.len = size - sizeof(struct setup_data);
-       rom->data.next = 0;
-       rom->pcilen = pci->romsize;
+       rom->data.type  = SETUP_PCI;
+       rom->data.len   = size - sizeof(struct setup_data);
+       rom->data.next  = 0;
+       rom->pcilen     = pci->romsize;
        *__rom = rom;
 
        status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -179,96 +118,6 @@ free_struct:
        return status;
 }
 
-static void
-setup_efi_pci32(struct boot_params *params, void **pci_handle,
-               unsigned long size)
-{
-       efi_pci_io_protocol_t *pci = NULL;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       u32 *handles = (u32 *)(unsigned long)pci_handle;
-       efi_status_t status;
-       unsigned long nr_pci;
-       struct setup_data *data;
-       int i;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       nr_pci = size / sizeof(u32);
-       for (i = 0; i < nr_pci; i++) {
-               struct pci_setup_rom *rom = NULL;
-               u32 h = handles[i];
-
-               status = efi_call_early(handle_protocol, h,
-                                       &pci_proto, (void **)&pci);
-
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (!pci)
-                       continue;
-
-               status = __setup_efi_pci(pci, &rom);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (data)
-                       data->next = (unsigned long)rom;
-               else
-                       params->hdr.setup_data = (unsigned long)rom;
-
-               data = (struct setup_data *)rom;
-
-       }
-}
-
-static void
-setup_efi_pci64(struct boot_params *params, void **pci_handle,
-               unsigned long size)
-{
-       efi_pci_io_protocol_t *pci = NULL;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       u64 *handles = (u64 *)(unsigned long)pci_handle;
-       efi_status_t status;
-       unsigned long nr_pci;
-       struct setup_data *data;
-       int i;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       nr_pci = size / sizeof(u64);
-       for (i = 0; i < nr_pci; i++) {
-               struct pci_setup_rom *rom = NULL;
-               u64 h = handles[i];
-
-               status = efi_call_early(handle_protocol, h,
-                                       &pci_proto, (void **)&pci);
-
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (!pci)
-                       continue;
-
-               status = __setup_efi_pci(pci, &rom);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (data)
-                       data->next = (unsigned long)rom;
-               else
-                       params->hdr.setup_data = (unsigned long)rom;
-
-               data = (struct setup_data *)rom;
-
-       }
-}
-
 /*
  * There's no way to return an informative status from this function,
  * because any analysis (and printing of error messages) needs to be
@@ -284,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
        void **pci_handle = NULL;
        efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
        unsigned long size = 0;
+       unsigned long nr_pci;
+       struct setup_data *data;
+       int i;
 
        status = efi_call_early(locate_handle,
                                EFI_LOCATE_BY_PROTOCOL,
@@ -295,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
                                        size, (void **)&pci_handle);
 
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
+                       efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
                        return;
                }
 
@@ -307,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
        if (status != EFI_SUCCESS)
                goto free_handle;
 
-       if (efi_early->is64)
-               setup_efi_pci64(params, pci_handle, size);
-       else
-               setup_efi_pci32(params, pci_handle, size);
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
+       for (i = 0; i < nr_pci; i++) {
+               efi_pci_io_protocol_t *pci = NULL;
+               struct pci_setup_rom *rom;
+
+               status = efi_call_early(handle_protocol,
+                                       efi_is_64bit() ? ((u64 *)pci_handle)[i]
+                                                      : ((u32 *)pci_handle)[i],
+                                       &pci_proto, (void **)&pci);
+               if (status != EFI_SUCCESS || !pci)
+                       continue;
+
+               status = preserve_pci_rom_image(pci, &rom);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               if (data)
+                       data->next = (unsigned long)rom;
+               else
+                       params->hdr.setup_data = (unsigned long)rom;
+
+               data = (struct setup_data *)rom;
+       }
 
 free_handle:
        efi_call_early(free_pool, pci_handle);
@@ -341,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
                status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                        size + sizeof(struct setup_data), &new);
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table,
-                                       "Failed to alloc mem for properties\n");
+                       efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
                        return;
                }
 
@@ -358,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
        new->next = 0;
 
        data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
-       if (!data)
+       if (!data) {
                boot_params->hdr.setup_data = (unsigned long)new;
-       else {
+       else {
                while (data->next)
                        data = (struct setup_data *)(unsigned long)data->next;
                data->next = (unsigned long)new;
@@ -380,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
        }
 }
 
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
 static efi_status_t
-setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
 {
-       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
-       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+       efi_status_t status;
+       u32 width, height;
+       void **uga_handle = NULL;
+       efi_uga_draw_protocol_t *uga = NULL, *first_uga;
        unsigned long nr_ugas;
-       u32 *handles = (u32 *)uga_handle;
-       efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
-       first_uga = NULL;
-       nr_ugas = size / sizeof(u32);
-       for (i = 0; i < nr_ugas; i++) {
-               efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
-               u32 w, h, depth, refresh;
-               void *pciio;
-               u32 handle = handles[i];
-
-               status = efi_call_early(handle_protocol, handle,
-                                       &uga_proto, (void **)&uga);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
-
-               status = efi_early->call((unsigned long)uga->get_mode, uga,
-                                        &w, &h, &depth, &refresh);
-               if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       *width = w;
-                       *height = h;
-
-                       /*
-                        * Once we've found a UGA supporting PCIIO,
-                        * don't bother looking any further.
-                        */
-                       if (pciio)
-                               break;
-
-                       first_uga = uga;
-               }
-       }
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)&uga_handle);
+       if (status != EFI_SUCCESS)
+               return status;
 
-       return status;
-}
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               uga_proto, NULL, &size, uga_handle);
+       if (status != EFI_SUCCESS)
+               goto free_handle;
 
-static efi_status_t
-setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
-{
-       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
-       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
-       unsigned long nr_ugas;
-       u64 *handles = (u64 *)uga_handle;
-       efi_status_t status = EFI_INVALID_PARAMETER;
-       int i;
+       height = 0;
+       width = 0;
 
        first_uga = NULL;
-       nr_ugas = size / sizeof(u64);
+       nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
        for (i = 0; i < nr_ugas; i++) {
                efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
                u32 w, h, depth, refresh;
                void *pciio;
-               u64 handle = handles[i];
+               unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
+                                                     : ((u32 *)uga_handle)[i];
 
                status = efi_call_early(handle_protocol, handle,
-                                       &uga_proto, (void **)&uga);
+                                       uga_proto, (void **)&uga);
                if (status != EFI_SUCCESS)
                        continue;
 
+               pciio = NULL;
                efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
 
-               status = efi_early->call((unsigned long)uga->get_mode, uga,
-                                        &w, &h, &depth, &refresh);
+               status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
+                                       &w, &h, &depth, &refresh);
                if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       *width = w;
-                       *height = h;
+                       width = w;
+                       height = h;
 
                        /*
                         * Once we've found a UGA supporting PCIIO,
@@ -467,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
                }
        }
 
-       return status;
-}
-
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
-                             unsigned long size)
-{
-       efi_status_t status;
-       u32 width, height;
-       void **uga_handle = NULL;
-
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)&uga_handle);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               uga_proto, NULL, &size, uga_handle);
-       if (status != EFI_SUCCESS)
-               goto free_handle;
-
-       height = 0;
-       width = 0;
-
-       if (efi_early->is64)
-               status = setup_uga64(uga_handle, size, &width, &height);
-       else
-               status = setup_uga32(uga_handle, size, &width, &height);
-
        if (!width && !height)
                goto free_handle;
 
        /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+       si->orig_video_isVGA    = VIDEO_TYPE_EFI;
 
-       si->lfb_depth = 32;
-       si->lfb_width = width;
-       si->lfb_height = height;
+       si->lfb_depth           = 32;
+       si->lfb_width           = width;
+       si->lfb_height          = height;
 
-       si->red_size = 8;
-       si->red_pos = 16;
-       si->green_size = 8;
-       si->green_pos = 8;
-       si->blue_size = 8;
-       si->blue_pos = 0;
-       si->rsvd_size = 8;
-       si->rsvd_pos = 24;
+       si->red_size            = 8;
+       si->red_pos             = 16;
+       si->green_size          = 8;
+       si->green_pos           = 8;
+       si->blue_size           = 8;
+       si->blue_pos            = 0;
+       si->rsvd_size           = 8;
+       si->rsvd_pos            = 24;
 
 free_handle:
        efi_call_early(free_pool, uga_handle);
+
        return status;
 }
 
@@ -586,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                return NULL;
 
-       if (efi_early->is64)
+       if (efi_is_64bit())
                setup_boot_services64(efi_early);
        else
                setup_boot_services32(efi_early);
@@ -601,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        status = efi_low_alloc(sys_table, 0x4000, 1,
                               (unsigned long *)&boot_params);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
+               efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
                return NULL;
        }
 
@@ -617,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
         * Fill out some of the header fields ourselves because the
         * EFI firmware loader doesn't load the first sector.
         */
-       hdr->root_flags = 1;
-       hdr->vid_mode = 0xffff;
-       hdr->boot_flag = 0xAA55;
+       hdr->root_flags = 1;
+       hdr->vid_mode   = 0xffff;
+       hdr->boot_flag  = 0xAA55;
 
        hdr->type_of_loader = 0x21;
 
@@ -627,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
        if (!cmdline_ptr)
                goto fail;
+
        hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
        /* Fill in upper bits of command line address, NOP on 32 bit  */
        boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -663,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
        boot_params->ext_ramdisk_size  = (u64)ramdisk_size >> 32;
 
        return boot_params;
+
 fail2:
        efi_free(sys_table, options_size, hdr->cmd_line_ptr);
 fail:
        efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+
        return NULL;
 }
 
@@ -678,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
        unsigned long size;
 
        e820ext->type = SETUP_E820_EXT;
-       e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+       e820ext->len  = nr_entries * sizeof(struct boot_e820_entry);
        e820ext->next = 0;
 
        data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -692,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
                params->hdr.setup_data = (unsigned long)e820ext;
 }
 
-static efi_status_t setup_e820(struct boot_params *params,
-                              struct setup_data *e820ext, u32 e820ext_size)
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
 {
        struct boot_e820_entry *entry = params->e820_table;
        struct efi_info *efi = &params->efi_info;
@@ -814,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
 }
 
 struct exit_boot_struct {
-       struct boot_params *boot_params;
-       struct efi_info *efi;
-       struct setup_data *e820ext;
-       __u32 e820ext_size;
-       bool is64;
+       struct boot_params      *boot_params;
+       struct efi_info         *efi;
+       struct setup_data       *e820ext;
+       __u32                   e820ext_size;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -845,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
                first = false;
        }
 
-       signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+       signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+                                  : EFI32_LOADER_SIGNATURE;
        memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
 
-       p->efi->efi_systab = (unsigned long)sys_table_arg;
-       p->efi->efi_memdesc_size = *map->desc_size;
-       p->efi->efi_memdesc_version = *map->desc_ver;
-       p->efi->efi_memmap = (unsigned long)*map->map;
-       p->efi->efi_memmap_size = *map->map_size;
+       p->efi->efi_systab              = (unsigned long)sys_table_arg;
+       p->efi->efi_memdesc_size        = *map->desc_size;
+       p->efi->efi_memdesc_version     = *map->desc_ver;
+       p->efi->efi_memmap              = (unsigned long)*map->map;
+       p->efi->efi_memmap_size         = *map->map_size;
 
 #ifdef CONFIG_X86_64
-       p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
-       p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+       p->efi->efi_systab_hi           = (unsigned long)sys_table_arg >> 32;
+       p->efi->efi_memmap_hi           = (unsigned long)*map->map >> 32;
 #endif
 
        return EFI_SUCCESS;
 }
 
-static efi_status_t exit_boot(struct boot_params *boot_params,
-                             void *handle, bool is64)
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
 {
        unsigned long map_sz, key, desc_size, buff_size;
        efi_memory_desc_t *mem_map;
@@ -874,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
        struct efi_boot_memmap map;
        struct exit_boot_struct priv;
 
-       map.map =               &mem_map;
-       map.map_size =          &map_sz;
-       map.desc_size =         &desc_size;
-       map.desc_ver =          &desc_version;
-       map.key_ptr =           &key;
-       map.buff_size =         &buff_size;
-       priv.boot_params =      boot_params;
-       priv.efi =              &boot_params->efi_info;
-       priv.e820ext =          NULL;
-       priv.e820ext_size =     0;
-       priv.is64 =             is64;
+       map.map                 = &mem_map;
+       map.map_size            = &map_sz;
+       map.desc_size           = &desc_size;
+       map.desc_ver            = &desc_version;
+       map.key_ptr             = &key;
+       map.buff_size           = &buff_size;
+       priv.boot_params        = boot_params;
+       priv.efi                = &boot_params->efi_info;
+       priv.e820ext            = NULL;
+       priv.e820ext_size       = 0;
 
        /* Might as well exit boot services now */
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -892,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
        if (status != EFI_SUCCESS)
                return status;
 
-       e820ext = priv.e820ext;
-       e820ext_size = priv.e820ext_size;
+       e820ext                 = priv.e820ext;
+       e820ext_size            = priv.e820ext_size;
+
        /* Historic? */
-       boot_params->alt_mem_k = 32 * 1024;
+       boot_params->alt_mem_k  = 32 * 1024;
 
        status = setup_e820(boot_params, e820ext, e820ext_size);
        if (status != EFI_SUCCESS)
@@ -908,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
  * On success we return a pointer to a boot_params structure, and NULL
  * on failure.
  */
-struct boot_params *efi_main(struct efi_config *c,
-                            struct boot_params *boot_params)
+struct boot_params *
+efi_main(struct efi_config *c, struct boot_params *boot_params)
 {
        struct desc_ptr *gdt = NULL;
        efi_loaded_image_t *image;
@@ -918,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
        struct desc_struct *desc;
        void *handle;
        efi_system_table_t *_table;
-       bool is64;
 
        efi_early = c;
 
        _table = (efi_system_table_t *)(unsigned long)efi_early->table;
        handle = (void *)(unsigned long)efi_early->image_handle;
-       is64 = efi_early->is64;
 
        sys_table = _table;
 
@@ -932,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                goto fail;
 
-       if (is64)
+       if (efi_is_64bit())
                setup_boot_services64(efi_early);
        else
                setup_boot_services32(efi_early);
@@ -957,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                sizeof(*gdt), (void **)&gdt);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
                goto fail;
        }
 
@@ -965,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
        status = efi_low_alloc(sys_table, gdt->size, 8,
                           (unsigned long *)&gdt->address);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for gdt\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
                goto fail;
        }
 
@@ -988,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
                hdr->code32_start = bzimage_addr;
        }
 
-       status = exit_boot(boot_params, handle, is64);
+       status = exit_boot(boot_params, handle);
        if (status != EFI_SUCCESS) {
                efi_printk(sys_table, "exit_boot() failed!\n");
                goto fail;
@@ -1002,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
 
        if (IS_ENABLED(CONFIG_X86_64)) {
                /* __KERNEL32_CS */
-               desc->limit0 = 0xffff;
-               desc->base0 = 0x0000;
-               desc->base1 = 0x0000;
-               desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
-               desc->s = DESC_TYPE_CODE_DATA;
-               desc->dpl = 0;
-               desc->p = 1;
-               desc->limit1 = 0xf;
-               desc->avl = 0;
-               desc->l = 0;
-               desc->d = SEG_OP_SIZE_32BIT;
-               desc->g = SEG_GRANULARITY_4KB;
-               desc->base2 = 0x00;
+               desc->limit0    = 0xffff;
+               desc->base0     = 0x0000;
+               desc->base1     = 0x0000;
+               desc->type      = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+               desc->s         = DESC_TYPE_CODE_DATA;
+               desc->dpl       = 0;
+               desc->p         = 1;
+               desc->limit1    = 0xf;
+               desc->avl       = 0;
+               desc->l         = 0;
+               desc->d         = SEG_OP_SIZE_32BIT;
+               desc->g         = SEG_GRANULARITY_4KB;
+               desc->base2     = 0x00;
+
                desc++;
        } else {
                /* Second entry is unused on 32-bit */
@@ -1022,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
        }
 
        /* __KERNEL_CS */
-       desc->limit0 = 0xffff;
-       desc->base0 = 0x0000;
-       desc->base1 = 0x0000;
-       desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
-       desc->s = DESC_TYPE_CODE_DATA;
-       desc->dpl = 0;
-       desc->p = 1;
-       desc->limit1 = 0xf;
-       desc->avl = 0;
+       desc->limit0    = 0xffff;
+       desc->base0     = 0x0000;
+       desc->base1     = 0x0000;
+       desc->type      = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+       desc->s         = DESC_TYPE_CODE_DATA;
+       desc->dpl       = 0;
+       desc->p         = 1;
+       desc->limit1    = 0xf;
+       desc->avl       = 0;
+
        if (IS_ENABLED(CONFIG_X86_64)) {
                desc->l = 1;
                desc->d = 0;
@@ -1038,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
                desc->l = 0;
                desc->d = SEG_OP_SIZE_32BIT;
        }
-       desc->g = SEG_GRANULARITY_4KB;
-       desc->base2 = 0x00;
+       desc->g         = SEG_GRANULARITY_4KB;
+       desc->base2     = 0x00;
        desc++;
 
        /* __KERNEL_DS */
-       desc->limit0 = 0xffff;
-       desc->base0 = 0x0000;
-       desc->base1 = 0x0000;
-       desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
-       desc->s = DESC_TYPE_CODE_DATA;
-       desc->dpl = 0;
-       desc->p = 1;
-       desc->limit1 = 0xf;
-       desc->avl = 0;
-       desc->l = 0;
-       desc->d = SEG_OP_SIZE_32BIT;
-       desc->g = SEG_GRANULARITY_4KB;
-       desc->base2 = 0x00;
+       desc->limit0    = 0xffff;
+       desc->base0     = 0x0000;
+       desc->base1     = 0x0000;
+       desc->type      = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
+       desc->s         = DESC_TYPE_CODE_DATA;
+       desc->dpl       = 0;
+       desc->p         = 1;
+       desc->limit1    = 0xf;
+       desc->avl       = 0;
+       desc->l         = 0;
+       desc->d         = SEG_OP_SIZE_32BIT;
+       desc->g         = SEG_GRANULARITY_4KB;
+       desc->base2     = 0x00;
        desc++;
 
        if (IS_ENABLED(CONFIG_X86_64)) {
                /* Task segment value */
-               desc->limit0 = 0x0000;
-               desc->base0 = 0x0000;
-               desc->base1 = 0x0000;
-               desc->type = SEG_TYPE_TSS;
-               desc->s = 0;
-               desc->dpl = 0;
-               desc->p = 1;
-               desc->limit1 = 0x0;
-               desc->avl = 0;
-               desc->l = 0;
-               desc->d = 0;
-               desc->g = SEG_GRANULARITY_4KB;
-               desc->base2 = 0x00;
+               desc->limit0    = 0x0000;
+               desc->base0     = 0x0000;
+               desc->base1     = 0x0000;
+               desc->type      = SEG_TYPE_TSS;
+               desc->s         = 0;
+               desc->dpl       = 0;
+               desc->p         = 1;
+               desc->limit1    = 0x0;
+               desc->avl       = 0;
+               desc->l         = 0;
+               desc->d         = 0;
+               desc->g         = SEG_GRANULARITY_4KB;
+               desc->base2     = 0x00;
                desc++;
        }
 
@@ -1082,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
        return boot_params;
 fail:
        efi_printk(sys_table, "efi_main() failed!\n");
+
        return NULL;
 }
index e799dc5..8297387 100644 (file)
 
 #define DESC_TYPE_CODE_DATA    (1 << 0)
 
-struct efi_uga_draw_protocol_32 {
+typedef struct {
        u32 get_mode;
        u32 set_mode;
        u32 blt;
-};
+} efi_uga_draw_protocol_32_t;
 
-struct efi_uga_draw_protocol_64 {
+typedef struct {
        u64 get_mode;
        u64 set_mode;
        u64 blt;
-};
+} efi_uga_draw_protocol_64_t;
 
-struct efi_uga_draw_protocol {
+typedef struct {
        void *get_mode;
        void *set_mode;
        void *blt;
-};
+} efi_uga_draw_protocol_t;
 
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index 8c51075..9e21573 100644 (file)
@@ -1,3 +1,4 @@
+#include <asm/e820/types.h>
 #include <asm/processor.h>
 #include "pgtable.h"
 #include "../string.h"
@@ -34,10 +35,62 @@ unsigned long *trampoline_32bit __section(.data);
 extern struct boot_params *boot_params;
 int cmdline_find_option_bool(const char *option);
 
+static unsigned long find_trampoline_placement(void)
+{
+       unsigned long bios_start, ebda_start;
+       unsigned long trampoline_start;
+       struct boot_e820_entry *entry;
+       int i;
+
+       /*
+        * Find a suitable spot for the trampoline.
+        * This code is based on reserve_bios_regions().
+        */
+
+       ebda_start = *(unsigned short *)0x40e << 4;
+       bios_start = *(unsigned short *)0x413 << 10;
+
+       if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+               bios_start = BIOS_START_MAX;
+
+       if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
+               bios_start = ebda_start;
+
+       bios_start = round_down(bios_start, PAGE_SIZE);
+
+       /* Find the first usable memory region under bios_start. */
+       for (i = boot_params->e820_entries - 1; i >= 0; i--) {
+               entry = &boot_params->e820_table[i];
+
+               /* Skip all entries above bios_start. */
+               if (bios_start <= entry->addr)
+                       continue;
+
+               /* Skip non-RAM entries. */
+               if (entry->type != E820_TYPE_RAM)
+                       continue;
+
+               /* Adjust bios_start to the end of the entry if needed. */
+               if (bios_start > entry->addr + entry->size)
+                       bios_start = entry->addr + entry->size;
+
+               /* Keep bios_start page-aligned. */
+               bios_start = round_down(bios_start, PAGE_SIZE);
+
+               /* Skip the entry if it's too small. */
+               if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
+                       continue;
+
+               break;
+       }
+
+       /* Place the trampoline just below the end of low memory */
+       return bios_start - TRAMPOLINE_32BIT_SIZE;
+}
+
 struct paging_config paging_prepare(void *rmode)
 {
        struct paging_config paging_config = {};
-       unsigned long bios_start, ebda_start;
 
        /* Initialize boot_params. Required for cmdline_find_option_bool(). */
        boot_params = rmode;
@@ -61,23 +114,7 @@ struct paging_config paging_prepare(void *rmode)
                paging_config.l5_required = 1;
        }
 
-       /*
-        * Find a suitable spot for the trampoline.
-        * This code is based on reserve_bios_regions().
-        */
-
-       ebda_start = *(unsigned short *)0x40e << 4;
-       bios_start = *(unsigned short *)0x413 << 10;
-
-       if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
-               bios_start = BIOS_START_MAX;
-
-       if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
-               bios_start = ebda_start;
-
-       /* Place the trampoline just below the end of low memory, aligned to 4k */
-       paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE;
-       paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE);
+       paging_config.trampoline_start = find_trampoline_placement();
 
        trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
 
index 5de7c0d..acd11b3 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis128_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis128_aesni_alg,
index 876e486..2071c3d 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis128l_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis128l_aesni_alg,
index 2b5dd3a..b5f2a8f 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis256_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis256_aesni_alg,
index f111f36..6634907 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
 
 MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
 
-static const struct x86_cpu_id avx2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_AVX2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
-
 static int __init crypto_morus1280_avx2_module_init(void)
 {
-       if (!x86_match_cpu(avx2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus1280_avx2_algs,
index 839270a..95cf857 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
 
 MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
 
-static const struct x86_cpu_id sse2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
 static int __init crypto_morus1280_sse2_module_init(void)
 {
-       if (!x86_match_cpu(sse2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus1280_sse2_algs,
index 26b47e2..615fb7b 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
 
 MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
 
-static const struct x86_cpu_id sse2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
 static int __init crypto_morus640_sse2_module_init(void)
 {
-       if (!x86_match_cpu(sse2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus640_sse2_algs,
index 73a522d..8ae7ffd 100644 (file)
@@ -981,7 +981,7 @@ ENTRY(\sym)
 
        call    \do_sym
 
-       jmp     error_exit                      /* %ebx: no swapgs flag */
+       jmp     error_exit
        .endif
 END(\sym)
 .endm
@@ -1222,7 +1222,6 @@ END(paranoid_exit)
 
 /*
  * Save all registers in pt_regs, and switch GS if needed.
- * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
 ENTRY(error_entry)
        UNWIND_HINT_FUNC
@@ -1269,7 +1268,6 @@ ENTRY(error_entry)
         * for these here too.
         */
 .Lerror_kernelspace:
-       incl    %ebx
        leaq    native_irq_return_iret(%rip), %rcx
        cmpq    %rcx, RIP+8(%rsp)
        je      .Lerror_bad_iret
@@ -1303,28 +1301,20 @@ ENTRY(error_entry)
 
        /*
         * Pretend that the exception came from user mode: set up pt_regs
-        * as if we faulted immediately after IRET and clear EBX so that
-        * error_exit knows that we will be returning to user mode.
+        * as if we faulted immediately after IRET.
         */
        mov     %rsp, %rdi
        call    fixup_bad_iret
        mov     %rax, %rsp
-       decl    %ebx
        jmp     .Lerror_entry_from_usermode_after_swapgs
 END(error_entry)
 
-
-/*
- * On entry, EBX is a "return to kernel mode" flag:
- *   1: already in kernel mode, don't need SWAPGS
- *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
- */
 ENTRY(error_exit)
        UNWIND_HINT_REGS
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
-       testl   %ebx, %ebx
-       jnz     retint_kernel
+       testb   $3, CS(%rsp)
+       j     retint_kernel
        jmp     retint_user
 END(error_exit)
 
index 4b98101..d50bb4d 100644 (file)
@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
 {
        struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
        struct perf_event *event = pcpu->event;
-       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event *hwc;
        struct perf_sample_data data;
        struct perf_raw_record raw;
        struct pt_regs regs;
@@ -602,6 +602,10 @@ fail:
                return 0;
        }
 
+       if (WARN_ON_ONCE(!event))
+               goto fail;
+
+       hwc = &event->hw;
        msr = hwc->config_base;
        buf = ibs_data.regs;
        rdmsrl(msr, *buf);
index 707b2a9..86f0c15 100644 (file)
@@ -2997,6 +2997,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
                }
                if (x86_pmu.pebs_aliases)
                        x86_pmu.pebs_aliases(event);
+
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
        }
 
        if (needs_branch_stack(event)) {
index 8cf03f1..8dbba77 100644 (file)
@@ -1185,17 +1185,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
                data->data_src.val = val;
        }
 
+       /*
+        * We must however always use iregs for the unwinder to stay sane; the
+        * record BP,SP,IP can point into thin air when the record is from a
+        * previous PMI context or an (I)RET happend between the record and
+        * PMI.
+        */
+       if (sample_type & PERF_SAMPLE_CALLCHAIN)
+               data->callchain = perf_callchain(event, iregs);
+
        /*
         * We use the interrupt regs as a base because the PEBS record does not
         * contain a full regs set, specifically it seems to lack segment
         * descriptors, which get used by things like user_mode().
         *
         * In the simple case fix up only the IP for PERF_SAMPLE_IP.
-        *
-        * We must however always use BP,SP from iregs for the unwinder to stay
-        * sane; the record BP,SP can point into thin air when the record is
-        * from a previous PMI context or an (I)RET happend between the record
-        * and PMI.
         */
        *regs = *iregs;
 
@@ -1214,15 +1218,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
                regs->si = pebs->si;
                regs->di = pebs->di;
 
-               /*
-                * Per the above; only set BP,SP if we don't need callchains.
-                *
-                * XXX: does this make sense?
-                */
-               if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
-                       regs->bp = pebs->bp;
-                       regs->sp = pebs->sp;
-               }
+               regs->bp = pebs->bp;
+               regs->sp = pebs->sp;
 
 #ifndef CONFIG_X86_32
                regs->r8 = pebs->r8;
index c9e1e0b..e17ab88 100644 (file)
@@ -28,7 +28,7 @@
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX       3
+#define UNCORE_EXTRA_PCI_DEV_MAX       4
 
 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
 
index 87dc026..51d7c11 100644 (file)
@@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
 enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
+       BDX_PCI_QPI_PORT2_FILTER,
        HSWEP_PCI_PCU_3,
 };
 
@@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
        },
        { /* QPI Port 0 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  SNBEP_PCI_QPI_PORT0_FILTER),
        },
        { /* QPI Port 1 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  SNBEP_PCI_QPI_PORT1_FILTER),
        },
        { /* QPI Port 2 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  BDX_PCI_QPI_PORT2_FILTER),
        },
        { /* PCU.3 (for Capability registers) */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
index 9ef5ee0..159622e 100644 (file)
@@ -43,7 +43,7 @@ asm    (".pushsection .text;"
        "push  %rdx;"
        "mov   $0x1,%eax;"
        "xor   %edx,%edx;"
-       "lock cmpxchg %dl,(%rdi);"
+       LOCK_PREFIX "cmpxchg %dl,(%rdi);"
        "cmp   $0x1,%al;"
        "jne   .slowpath;"
        "pop   %rdx;"
index 2aabd4c..adbda58 100644 (file)
@@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void)
        case 0x04: return 0x02000014;
        }
 
+       if (boot_cpu_data.x86_stepping > 4)
+               return 0;
+
        return ~0U;
 }
 
index 8c50754..4b76728 100644 (file)
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
 {
        memset(m, 0, sizeof(struct mce));
        m->cpu = m->extcpu = smp_processor_id();
-       /* We hope get_seconds stays lockless */
-       m->time = get_seconds();
+       /* need the internal __ version to avoid deadlocks */
+       m->time = __ktime_get_real_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
        m->cpuid = cpuid_eax(1);
        m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
 }
 #endif
 
+
+/*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ *  skip those CPUs which remain looping in the 1st kernel - see
+ *  crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+static bool __mc_check_crashing_cpu(int cpu)
+{
+       if (cpu_is_offline(cpu) ||
+           (crashing_cpu != -1 && crashing_cpu != cpu)) {
+               u64 mcgstatus;
+
+               mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+               if (mcgstatus & MCG_STATUS_RIPV) {
+                       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void __mc_scan_banks(struct mce *m, struct mce *final,
+                           unsigned long *toclear, unsigned long *valid_banks,
+                           int no_way_out, int *worst)
+{
+       struct mca_config *cfg = &mca_cfg;
+       int severity, i;
+
+       for (i = 0; i < cfg->banks; i++) {
+               __clear_bit(i, toclear);
+               if (!test_bit(i, valid_banks))
+                       continue;
+
+               if (!mce_banks[i].ctl)
+                       continue;
+
+               m->misc = 0;
+               m->addr = 0;
+               m->bank = i;
+
+               m->status = mce_rdmsrl(msr_ops.status(i));
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               /*
+                * Corrected or non-signaled errors are handled by
+                * machine_check_poll(). Leave them alone, unless this panics.
+                */
+               if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+                       !no_way_out)
+                       continue;
+
+               /* Set taint even when machine check was not enabled. */
+               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+               severity = mce_severity(m, cfg->tolerant, NULL, true);
+
+               /*
+                * When machine check was for corrected/deferred handler don't
+                * touch, unless we're panicking.
+                */
+               if ((severity == MCE_KEEP_SEVERITY ||
+                    severity == MCE_UCNA_SEVERITY) && !no_way_out)
+                       continue;
+
+               __set_bit(i, toclear);
+
+               /* Machine check event was not enabled. Clear, but ignore. */
+               if (severity == MCE_NO_SEVERITY)
+                       continue;
+
+               mce_read_aux(m, i);
+
+               /* assuming valid severity level != 0 */
+               m->severity = severity;
+
+               mce_log(m);
+
+               if (severity > *worst) {
+                       *final = *m;
+                       *worst = severity;
+               }
+       }
+
+       /* mce_clear_state will clear *final, save locally for use later */
+       *m = *final;
+}
+
 /*
  * The actual machine check handler. This only handles real
  * exceptions when something got corrupted coming in through int 18.
@@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
  */
 void do_machine_check(struct pt_regs *regs, long error_code)
 {
+       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
+       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
        struct mca_config *cfg = &mca_cfg;
+       int cpu = smp_processor_id();
+       char *msg = "Unknown";
        struct mce m, *final;
-       int i;
        int worst = 0;
-       int severity;
 
        /*
         * Establish sequential order between the CPUs entering the machine
         * check handler.
         */
        int order = -1;
+
        /*
         * If no_way_out gets set, there is no safe way to recover from this
         * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
         */
        int no_way_out = 0;
+
        /*
         * If kill_it gets set, there might be a way to recover from this
         * error.
         */
        int kill_it = 0;
-       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
-       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
-       char *msg = "Unknown";
 
        /*
         * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
         * on Intel.
         */
        int lmce = 1;
-       int cpu = smp_processor_id();
 
-       /*
-        * Cases where we avoid rendezvous handler timeout:
-        * 1) If this CPU is offline.
-        *
-        * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
-        *  skip those CPUs which remain looping in the 1st kernel - see
-        *  crash_nmi_callback().
-        *
-        * Note: there still is a small window between kexec-ing and the new,
-        * kdump kernel establishing a new #MC handler where a broadcasted MCE
-        * might not get handled properly.
-        */
-       if (cpu_is_offline(cpu) ||
-           (crashing_cpu != -1 && crashing_cpu != cpu)) {
-               u64 mcgstatus;
-
-               mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
-               if (mcgstatus & MCG_STATUS_RIPV) {
-                       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-                       return;
-               }
-       }
+       if (__mc_check_crashing_cpu(cpu))
+               return;
 
        ist_enter(regs);
 
        this_cpu_inc(mce_exception_count);
 
-       if (!cfg->banks)
-               goto out;
-
        mce_gather_info(&m, regs);
        m.tsc = rdtsc();
 
@@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                order = mce_start(&no_way_out);
        }
 
-       for (i = 0; i < cfg->banks; i++) {
-               __clear_bit(i, toclear);
-               if (!test_bit(i, valid_banks))
-                       continue;
-               if (!mce_banks[i].ctl)
-                       continue;
-
-               m.misc = 0;
-               m.addr = 0;
-               m.bank = i;
-
-               m.status = mce_rdmsrl(msr_ops.status(i));
-               if ((m.status & MCI_STATUS_VAL) == 0)
-                       continue;
-
-               /*
-                * Non uncorrected or non signaled errors are handled by
-                * machine_check_poll. Leave them alone, unless this panics.
-                */
-               if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
-                       !no_way_out)
-                       continue;
-
-               /*
-                * Set taint even when machine check was not enabled.
-                */
-               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
-               severity = mce_severity(&m, cfg->tolerant, NULL, true);
-
-               /*
-                * When machine check was for corrected/deferred handler don't
-                * touch, unless we're panicing.
-                */
-               if ((severity == MCE_KEEP_SEVERITY ||
-                    severity == MCE_UCNA_SEVERITY) && !no_way_out)
-                       continue;
-               __set_bit(i, toclear);
-               if (severity == MCE_NO_SEVERITY) {
-                       /*
-                        * Machine check event was not enabled. Clear, but
-                        * ignore.
-                        */
-                       continue;
-               }
-
-               mce_read_aux(&m, i);
-
-               /* assuming valid severity level != 0 */
-               m.severity = severity;
-
-               mce_log(&m);
-
-               if (severity > worst) {
-                       *final = m;
-                       worst = severity;
-               }
-       }
-
-       /* mce_clear_state will clear *final, save locally for use later */
-       m = *final;
+       __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
 
        if (!no_way_out)
                mce_clear_state(toclear);
@@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        if (worst > 0)
                mce_report_event(regs);
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
+
        sync_core();
 
        if (worst != MCE_AR_SEVERITY && !kill_it)
index d594690..6b8f115 100644 (file)
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL);
+               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
                if (!page)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = page;
index e30da9a..5d8e317 100644 (file)
@@ -7893,6 +7893,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
                     HRTIMER_MODE_REL_PINNED);
        vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
 
+       vmx->nested.vpid02 = allocate_vpid();
+
        vmx->nested.vmxon = true;
        return 0;
 
@@ -8480,21 +8482,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 /* Emulate the VMPTRST instruction */
 static int handle_vmptrst(struct kvm_vcpu *vcpu)
 {
-       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-       u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-       gva_t vmcs_gva;
+       unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
+       u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+       gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
        struct x86_exception e;
+       gva_t gva;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (get_vmx_mem_address(vcpu, exit_qualification,
-                       vmx_instruction_info, true, &vmcs_gva))
+       if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
                return 1;
        /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
-       if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
-                                       (void *)&to_vmx(vcpu)->nested.current_vmptr,
-                                       sizeof(u64), &e)) {
+       if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+                                       sizeof(gpa_t), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
@@ -10370,11 +10371,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                        goto free_vmcs;
        }
 
-       if (nested) {
+       if (nested)
                nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
                                           kvm_vcpu_apicv_active(&vmx->vcpu));
-               vmx->nested.vpid02 = allocate_vpid();
-       }
 
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
@@ -10391,7 +10390,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        return &vmx->vcpu;
 
 free_vmcs:
-       free_vpid(vmx->nested.vpid02);
        free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
        kfree(vmx->guest_msrs);
index 5579987..8f6cc71 100644 (file)
@@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth)
 
        /* sub esp,STACK_SIZE */
        EMIT2_off32(0x81, 0xEC, STACK_SIZE);
-       /* sub ebp,SCRATCH_SIZE+4+12*/
-       EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16);
+       /* sub ebp,SCRATCH_SIZE+12*/
+       EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
        /* xor ebx,ebx */
        EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
 
@@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)
        /* mov edx,dword ptr [ebp+off]*/
        EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
 
-       /* add ebp,SCRATCH_SIZE+4+12*/
-       EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16);
+       /* add ebp,SCRATCH_SIZE+12*/
+       EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
 
        /* mov ebx,dword ptr [ebp-12]*/
        EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
index 77873ce..ee5d08f 100644 (file)
@@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
        if (!(md->attribute & EFI_MEMORY_WB))
                flags |= _PAGE_PCD;
 
-       if (sev_active())
+       if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
                flags |= _PAGE_ENC;
 
        pfn = md->phys_addr >> PAGE_SHIFT;
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
 #ifdef CONFIG_EFI_MIXED
 extern efi_status_t efi64_thunk(u32, ...);
 
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
 #define runtime_service32(func)                                                 \
 ({                                                                      \
        u32 table = (u32)(unsigned long)efi.systab;                      \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
 #define efi_thunk(f, ...)                                              \
 ({                                                                     \
        efi_status_t __s;                                               \
-       unsigned long __flags;                                          \
        u32 __func;                                                     \
                                                                        \
-       local_irq_save(__flags);                                        \
        arch_efi_call_virt_setup();                                     \
                                                                        \
        __func = runtime_service32(f);                                  \
        __s = efi64_thunk(__func, __VA_ARGS__);                         \
                                                                        \
        arch_efi_call_virt_teardown();                                  \
-       local_irq_restore(__flags);                                     \
                                                                        \
        __s;                                                            \
 })
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        efi_status_t status;
        u32 phys_tm, phys_tc;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
        phys_tc = virt_to_phys_or_null(tc);
 
        status = efi_thunk(get_time, phys_tm, phys_tc);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 {
        efi_status_t status;
        u32 phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
 
        status = efi_thunk(set_time, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 {
        efi_status_t status;
        u32 phys_enabled, phys_pending, phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_enabled = virt_to_phys_or_null(enabled);
        phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
        status = efi_thunk(get_wakeup_time, phys_enabled,
                             phys_pending, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 {
        efi_status_t status;
        u32 phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
 
        status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
        efi_status_t status;
        u32 phys_name, phys_vendor, phys_attr;
        u32 phys_data_size, phys_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_data_size = virt_to_phys_or_null(data_size);
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
        status = efi_thunk(get_variable, phys_name, phys_vendor,
                           phys_attr, phys_data_size, phys_data);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 {
        u32 phys_name, phys_vendor, phys_data;
        efi_status_t status;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
+
+       phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+       phys_vendor = virt_to_phys_or_null(vendor);
+       phys_data = virt_to_phys_or_null_size(data, data_size);
+
+       /* If data_size is > sizeof(u32) we've got problems */
+       status = efi_thunk(set_variable, phys_name, phys_vendor,
+                          attr, data_size, phys_data);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+                                  u32 attr, unsigned long data_size,
+                                  void *data)
+{
+       u32 phys_name, phys_vendor, phys_data;
+       efi_status_t status;
+       unsigned long flags;
+
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
 
        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
        status = efi_thunk(set_variable, phys_name, phys_vendor,
                           attr, data_size, phys_data);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
 {
        efi_status_t status;
        u32 phys_name_size, phys_name, phys_vendor;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_name_size = virt_to_phys_or_null(name_size);
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
        status = efi_thunk(get_next_variable, phys_name_size,
                           phys_name, phys_vendor);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
 {
        efi_status_t status;
        u32 phys_count;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_count = virt_to_phys_or_null(count);
        status = efi_thunk(get_next_high_mono_count, phys_count);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
                       unsigned long data_size, efi_char16_t *data)
 {
        u32 phys_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_data = virt_to_phys_or_null_size(data, data_size);
 
        efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
 }
 
 static efi_status_t
@@ -872,10 +933,40 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 {
        efi_status_t status;
        u32 phys_storage, phys_remaining, phys_max;
+       unsigned long flags;
+
+       if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+               return EFI_UNSUPPORTED;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
+
+       phys_storage = virt_to_phys_or_null(storage_space);
+       phys_remaining = virt_to_phys_or_null(remaining_space);
+       phys_max = virt_to_phys_or_null(max_variable_size);
+
+       status = efi_thunk(query_variable_info, attr, phys_storage,
+                          phys_remaining, phys_max);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+                                         u64 *remaining_space,
+                                         u64 *max_variable_size)
+{
+       efi_status_t status;
+       u32 phys_storage, phys_remaining, phys_max;
+       unsigned long flags;
 
        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
                return EFI_UNSUPPORTED;
 
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
+
        phys_storage = virt_to_phys_or_null(storage_space);
        phys_remaining = virt_to_phys_or_null(remaining_space);
        phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +974,8 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
        status = efi_thunk(query_variable_info, attr, phys_storage,
                           phys_remaining, phys_max);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
        efi.get_variable = efi_thunk_get_variable;
        efi.get_next_variable = efi_thunk_get_next_variable;
        efi.set_variable = efi_thunk_set_variable;
+       efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
        efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
        efi.reset_system = efi_thunk_reset_system;
        efi.query_variable_info = efi_thunk_query_variable_info;
+       efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
        efi.update_capsule = efi_thunk_update_capsule;
        efi.query_capsule_caps = efi_thunk_query_capsule_caps;
 }
index 36c1f8b..844d31c 100644 (file)
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
 */
 void efi_delete_dummy_variable(void)
 {
-       efi.set_variable((efi_char16_t *)efi_dummy_name,
-                        &EFI_DUMMY_GUID,
-                        EFI_VARIABLE_NON_VOLATILE |
-                        EFI_VARIABLE_BOOTSERVICE_ACCESS |
-                        EFI_VARIABLE_RUNTIME_ACCESS,
-                        0, NULL);
+       efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+                                    &EFI_DUMMY_GUID,
+                                    EFI_VARIABLE_NON_VOLATILE |
+                                    EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                                    EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
 }
 
 /*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
        int num_entries;
        void *new;
 
-       if (efi_mem_desc_lookup(addr, &md)) {
+       if (efi_mem_desc_lookup(addr, &md) ||
+           md.type != EFI_BOOT_SERVICES_DATA) {
                pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
                return;
        }
index 744afdc..56c44d8 100644 (file)
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
        if (!FIXADDR_USER_START)
                return 0;
 
-       gate_vma.vm_mm = NULL;
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
index 67eff5e..047c5dc 100644 (file)
@@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
 EXPORT_SYMBOL(bio_add_page);
 
 /**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
  * @bio: bio to add pages to
  * @iter: iov iterator describing the region to be mapped
  *
- * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * Pins pages from *iter and appends them to @bio's bvec array. The
  * pages will have to be released using put_page() when done.
+ * For multi-segment *iter, this function only adds pages from the
+ * the next non-empty segment of the iov iterator.
  */
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 {
-       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
        struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
        struct page **pages = (struct page **)bv;
-       size_t offset, diff;
+       size_t offset;
        ssize_t size;
 
        size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
        if (unlikely(size <= 0))
                return size ? size : -EFAULT;
-       nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
 
        /*
         * Deep magic below:  We need to walk the pinned pages backwards
@@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        bio->bi_iter.bi_size += size;
        bio->bi_vcnt += nr_pages;
 
-       diff = (nr_pages * PAGE_SIZE - offset) - size;
-       while (nr_pages--) {
-               bv[nr_pages].bv_page = pages[nr_pages];
-               bv[nr_pages].bv_len = PAGE_SIZE;
-               bv[nr_pages].bv_offset = 0;
+       while (idx--) {
+               bv[idx].bv_page = pages[idx];
+               bv[idx].bv_len = PAGE_SIZE;
+               bv[idx].bv_offset = 0;
        }
 
        bv[0].bv_offset += offset;
        bv[0].bv_len -= offset;
-       if (diff)
-               bv[bio->bi_vcnt - 1].bv_len -= diff;
+       bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
 
        iov_iter_advance(iter, size);
        return 0;
 }
+
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ * The function tries, but does not guarantee, to pin as many pages as
+ * fit into the bio, or are requested in *iter, whatever is smaller.
+ * If MM encounters an error pinning the requested pages, it stops.
+ * Error is returned only if 0 pages could be pinned.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+       unsigned short orig_vcnt = bio->bi_vcnt;
+
+       do {
+               int ret = __bio_iov_iter_get_pages(bio, iter);
+
+               if (unlikely(ret))
+                       return bio->bi_vcnt > orig_vcnt ? 0 : ret;
+
+       } while (iov_iter_count(iter) && !bio_full(bio));
+
+       return 0;
+}
 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
 
 static void submit_bio_wait_endio(struct bio *bio)
@@ -1866,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
                bio_integrity_trim(split);
 
        bio_advance(bio, split->bi_iter.bi_size);
+       bio->bi_iter.bi_done = 0;
 
        if (bio_flagged(bio, BIO_TRACE_COMPLETION))
                bio_set_flag(split, BIO_TRACE_COMPLETION);
index f84a9b7..ee33590 100644 (file)
@@ -2155,11 +2155,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
        if (part->policy && op_is_write(bio_op(bio))) {
                char b[BDEVNAME_SIZE];
 
-               printk(KERN_ERR
+               WARN_ONCE(1,
                       "generic_make_request: Trying to write "
                        "to read-only block-device %s (partno %d)\n",
                        bio_devname(bio, b), part->partno);
-               return true;
+               /* Older lvm-tools actually trigger this */
+               return false;
        }
 
        return false;
index 09b2ee6..3de0836 100644 (file)
@@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
         * test and set the bit before assining ->rqs[].
         */
        rq = tags->rqs[bitnr];
-       if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
+       if (rq && blk_mq_request_started(rq))
                iter_data->fn(rq, iter_data->data, reserved);
 
        return true;
index 9591926..654b0dc 100644 (file)
@@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
        bool shared = false;
        int cpu;
 
-       if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
-                       MQ_RQ_IN_FLIGHT)
+       if (!blk_mq_mark_complete(rq))
                return;
-
        if (rq->internal_tag != -1)
                blk_mq_sched_completed_request(rq);
 
index f8fecfe..9706613 100644 (file)
@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
 #define LPSS_GPIODEF0_DMA_LLP          BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
+static bool lpss_iosf_d3_entered;
 
 static void lpss_iosf_enter_d3_state(void)
 {
@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
 
        iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
                        LPSS_IOSF_GPIODEF0, value1, mask1);
+
+       lpss_iosf_d3_entered = true;
+
 exit:
        mutex_unlock(&lpss_iosf_mutex);
 }
@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
 
        mutex_lock(&lpss_iosf_mutex);
 
+       if (!lpss_iosf_d3_entered)
+               goto exit;
+
+       lpss_iosf_d3_entered = false;
+
        iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
                        LPSS_IOSF_GPIODEF0, value1, mask1);
 
@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
        iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
                        LPSS_IOSF_PMCSR, value2, mask2);
 
+exit:
        mutex_unlock(&lpss_iosf_mutex);
 }
 
-static int acpi_lpss_suspend(struct device *dev, bool runtime)
+static int acpi_lpss_suspend(struct device *dev, bool wakeup)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
-       bool wakeup = runtime || device_may_wakeup(dev);
        int ret;
 
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
         * wrong status for devices being about to be powered off. See
         * lpss_iosf_enter_d3_state() for further information.
         */
-       if ((runtime || !pm_suspend_via_firmware()) &&
+       if (acpi_target_system_state() == ACPI_STATE_S0 &&
            lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_enter_d3_state();
 
        return ret;
 }
 
-static int acpi_lpss_resume(struct device *dev, bool runtime)
+static int acpi_lpss_resume(struct device *dev)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
         * This call is kept first to be in symmetry with
         * acpi_lpss_runtime_suspend() one.
         */
-       if ((runtime || !pm_resume_via_firmware()) &&
-           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_exit_d3_state();
 
        ret = acpi_dev_resume(dev);
@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
                return 0;
 
        ret = pm_generic_suspend_late(dev);
-       return ret ? ret : acpi_lpss_suspend(dev, false);
+       return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
 }
 
 static int acpi_lpss_resume_early(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev, false);
+       int ret = acpi_lpss_resume(dev);
 
        return ret ? ret : pm_generic_resume_early(dev);
 }
@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
 
 static int acpi_lpss_runtime_resume(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev, true);
+       int ret = acpi_lpss_resume(dev);
 
        return ret ? ret : pm_generic_runtime_resume(dev);
 }
index bc5f059..44f35ab 100644 (file)
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                        status =
                            acpi_ps_create_op(walk_state, aml_op_start, &op);
                        if (ACPI_FAILURE(status)) {
+                               /*
+                                * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+                                * executing it as a control method. However, if we encounter
+                                * an error while loading the table, we need to keep trying to
+                                * load the table rather than aborting the table load. Set the
+                                * status to AE_OK to proceed with the table load.
+                                */
+                               if ((walk_state->
+                                    parse_flags & ACPI_PARSE_MODULE_LEVEL)
+                                   && status == AE_ALREADY_EXISTS) {
+                                       status = AE_OK;
+                               }
                                if (status == AE_CTRL_PARSE_CONTINUE) {
                                        continue;
                                }
@@ -694,6 +706,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                            acpi_ps_next_parse_state(walk_state, op, status);
                        if (status == AE_CTRL_PENDING) {
                                status = AE_OK;
+                       } else
+                           if ((walk_state->
+                                parse_flags & ACPI_PARSE_MODULE_LEVEL)
+                               && status != AE_CTRL_TRANSFER
+                               && ACPI_FAILURE(status)) {
+                               /*
+                                * ACPI_PARSE_MODULE_LEVEL flag means that we are currently
+                                * loading a table by executing it as a control method.
+                                * However, if we encounter an error while loading the table,
+                                * we need to keep trying to load the table rather than
+                                * aborting the table load (setting the status to AE_OK
+                                * continues the table load). If we get a failure at this
+                                * point, it means that the dispatcher got an error while
+                                * processing Op (most likely an AML operand error) or a
+                                * control method was called from module level and the
+                                * dispatcher returned AE_CTRL_TRANSFER. In the latter case,
+                                * leave the status alone, there's nothing wrong with it.
+                                */
+                               status = AE_OK;
                        }
                }
 
index 1435d72..6ebcd65 100644 (file)
@@ -434,14 +434,6 @@ re_probe:
                        goto probe_failed;
        }
 
-       /*
-        * Ensure devices are listed in devices_kset in correct order
-        * It's important to move Dev to the end of devices_kset before
-        * calling .probe, because it could be recursive and parent Dev
-        * should always go first
-        */
-       devices_kset_move_last(dev);
-
        if (dev->bus->probe) {
                ret = dev->bus->probe(dev);
                if (ret)
index 74a0556..3fb95c8 100644 (file)
@@ -112,12 +112,16 @@ struct nbd_device {
        struct task_struct *task_setup;
 };
 
+#define NBD_CMD_REQUEUED       1
+
 struct nbd_cmd {
        struct nbd_device *nbd;
+       struct mutex lock;
        int index;
        int cookie;
-       struct completion send_complete;
        blk_status_t status;
+       unsigned long flags;
+       u32 cmd_cookie;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
        return disk_to_dev(nbd->disk);
 }
 
+static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+{
+       struct request *req = blk_mq_rq_from_pdu(cmd);
+
+       if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+               blk_mq_requeue_request(req, true);
+}
+
+#define NBD_COOKIE_BITS 32
+
+static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
+{
+       struct request *req = blk_mq_rq_from_pdu(cmd);
+       u32 tag = blk_mq_unique_tag(req);
+       u64 cookie = cmd->cmd_cookie;
+
+       return (cookie << NBD_COOKIE_BITS) | tag;
+}
+
+static u32 nbd_handle_to_tag(u64 handle)
+{
+       return (u32)handle;
+}
+
+static u32 nbd_handle_to_cookie(u64 handle)
+{
+       return (u32)(handle >> NBD_COOKIE_BITS);
+}
+
 static const char *nbdcmd_to_ascii(int cmd)
 {
        switch (cmd) {
@@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        config = nbd->config;
 
+       if (!mutex_trylock(&cmd->lock))
+               return BLK_EH_RESET_TIMER;
+
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
                                    "Connection timed out, retrying (%d/%d alive)\n",
@@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
                                        nbd_mark_nsock_dead(nbd, nsock, 1);
                                mutex_unlock(&nsock->tx_lock);
                        }
-                       blk_mq_requeue_request(req, true);
+                       mutex_unlock(&cmd->lock);
+                       nbd_requeue_cmd(cmd);
                        nbd_config_put(nbd);
                        return BLK_EH_DONE;
                }
@@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        set_bit(NBD_TIMEDOUT, &config->runtime_flags);
        cmd->status = BLK_STS_IOERR;
+       mutex_unlock(&cmd->lock);
        sock_shutdown(nbd);
        nbd_config_put(nbd);
 done:
@@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        struct iov_iter from;
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
+       u64 handle;
        u32 type;
        u32 nbd_cmd_flags = 0;
-       u32 tag = blk_mq_unique_tag(req);
        int sent = nsock->sent, skip = 0;
 
        iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                        goto send_pages;
                }
                iov_iter_advance(&from, sent);
+       } else {
+               cmd->cmd_cookie++;
        }
        cmd->index = index;
        cmd->cookie = nsock->cookie;
@@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
                request.len = htonl(size);
        }
-       memcpy(request.handle, &tag, sizeof(tag));
+       handle = nbd_cmd_handle(cmd);
+       memcpy(request.handle, &handle, sizeof(handle));
 
        dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
                req, nbdcmd_to_ascii(type),
@@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                                nsock->pending = req;
                                nsock->sent = sent;
                        }
+                       set_bit(NBD_CMD_REQUEUED, &cmd->flags);
                        return BLK_STS_RESOURCE;
                }
                dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -541,6 +583,7 @@ send_pages:
                                         */
                                        nsock->pending = req;
                                        nsock->sent = sent;
+                                       set_bit(NBD_CMD_REQUEUED, &cmd->flags);
                                        return BLK_STS_RESOURCE;
                                }
                                dev_err(disk_to_dev(nbd->disk),
@@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        struct nbd_reply reply;
        struct nbd_cmd *cmd;
        struct request *req = NULL;
+       u64 handle;
        u16 hwq;
        u32 tag;
        struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
        struct iov_iter to;
+       int ret = 0;
 
        reply.magic = 0;
        iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                return ERR_PTR(-EPROTO);
        }
 
-       memcpy(&tag, reply.handle, sizeof(u32));
-
+       memcpy(&handle, reply.handle, sizeof(handle));
+       tag = nbd_handle_to_tag(handle);
        hwq = blk_mq_unique_tag_to_hwq(tag);
        if (hwq < nbd->tag_set.nr_hw_queues)
                req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                return ERR_PTR(-ENOENT);
        }
        cmd = blk_mq_rq_to_pdu(req);
+
+       mutex_lock(&cmd->lock);
+       if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
+               dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
+                       req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
+               ret = -ENOENT;
+               goto out;
+       }
+       if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+               dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
+                       req);
+               ret = -ENOENT;
+               goto out;
+       }
        if (ntohl(reply.error)) {
                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
                cmd->status = BLK_STS_IOERR;
-               return cmd;
+               goto out;
        }
 
        dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                                if (nbd_disconnected(config) ||
                                    config->num_connections <= 1) {
                                        cmd->status = BLK_STS_IOERR;
-                                       return cmd;
+                                       goto out;
                                }
-                               return ERR_PTR(-EIO);
+                               ret = -EIO;
+                               goto out;
                        }
                        dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
                                req, bvec.bv_len);
                }
-       } else {
-               /* See the comment in nbd_queue_rq. */
-               wait_for_completion(&cmd->send_complete);
        }
-       return cmd;
+out:
+       mutex_unlock(&cmd->lock);
+       return ret ? ERR_PTR(ret) : cmd;
 }
 
 static void recv_work(struct work_struct *work)
@@ -805,7 +864,7 @@ again:
         */
        blk_mq_start_request(req);
        if (unlikely(nsock->pending && nsock->pending != req)) {
-               blk_mq_requeue_request(req, true);
+               nbd_requeue_cmd(cmd);
                ret = 0;
                goto out;
        }
@@ -818,7 +877,7 @@ again:
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                                    "Request send failed, requeueing\n");
                nbd_mark_nsock_dead(nbd, nsock, 1);
-               blk_mq_requeue_request(req, true);
+               nbd_requeue_cmd(cmd);
                ret = 0;
        }
 out:
@@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * that the server is misbehaving (or there was an error) before we're
         * done sending everything over the wire.
         */
-       init_completion(&cmd->send_complete);
+       mutex_lock(&cmd->lock);
+       clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
 
        /* We can be called directly from the user space process, which means we
         * could possibly have signals pending so our sendmsg will fail.  In
@@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                ret = BLK_STS_IOERR;
        else if (!ret)
                ret = BLK_STS_OK;
-       complete(&cmd->send_complete);
+       mutex_unlock(&cmd->lock);
 
        return ret;
 }
@@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
        cmd->nbd = set->driver_data;
+       cmd->flags = 0;
+       mutex_init(&cmd->lock);
        return 0;
 }
 
index 7436b2d..a390c6d 100644 (file)
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
        zram->backing_dev = NULL;
        zram->old_block_size = 0;
        zram->bdev = NULL;
-
+       zram->disk->queue->backing_dev_info->capabilities |=
+                               BDI_CAP_SYNCHRONOUS_IO;
        kvfree(zram->bitmap);
        zram->bitmap = NULL;
 }
@@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
        zram->backing_dev = backing_dev;
        zram->bitmap = bitmap;
        zram->nr_pages = nr_pages;
+       /*
+        * With writeback feature, zram does asynchronous IO so it's no longer
+        * synchronous device so let's remove synchronous io flag. Othewise,
+        * upper layer(e.g., swap) could wait IO completion rather than
+        * (submit and return), which will cause system sluggish.
+        * Furthermore, when the IO function returns(e.g., swap_readpage),
+        * upper layer expects IO was done so it could deallocate the page
+        * freely but in fact, IO is going on so finally could cause
+        * use-after-free when the IO is really done.
+        */
+       zram->disk->queue->backing_dev_info->capabilities &=
+                       ~BDI_CAP_SYNCHRONOUS_IO;
        up_write(&zram->init_lock);
 
        pr_info("setup backing device %s\n", file_name);
index ffeb60d..df66a9d 100644 (file)
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 #endif
        if (vma->vm_flags & VM_SHARED)
                return shmem_zero_setup(vma);
+       vma_set_anonymous(vma);
        return 0;
 }
 
index cd888d4..bd449ad 100644 (file)
@@ -1895,14 +1895,22 @@ static int
 write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
 {
        size_t bytes;
-       __u32 buf[16];
+       __u32 t, buf[16];
        const char __user *p = buffer;
 
        while (count > 0) {
+               int b, i = 0;
+
                bytes = min(count, sizeof(buf));
                if (copy_from_user(&buf, p, bytes))
                        return -EFAULT;
 
+               for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+                       if (!arch_get_random_int(&t))
+                               break;
+                       buf[i] ^= t;
+               }
+
                count -= bytes;
                p += bytes;
 
index 38b366b..7b70a07 100644 (file)
@@ -24,7 +24,7 @@
 #define ASPEED_MPLL_PARAM      0x20
 #define ASPEED_HPLL_PARAM      0x24
 #define  AST2500_HPLL_BYPASS_EN        BIT(20)
-#define  AST2400_HPLL_STRAPPED BIT(18)
+#define  AST2400_HPLL_PROGRAMMED BIT(18)
 #define  AST2400_HPLL_BYPASS_EN        BIT(17)
 #define ASPEED_MISC_CTRL       0x2c
 #define  UART_DIV13_EN         BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
        [ASPEED_CLK_GATE_GCLK] =        {  1,  7, "gclk-gate",          NULL,   0 }, /* 2D engine */
        [ASPEED_CLK_GATE_MCLK] =        {  2, -1, "mclk-gate",          "mpll", CLK_IS_CRITICAL }, /* SDRAM */
        [ASPEED_CLK_GATE_VCLK] =        {  3,  6, "vclk-gate",          NULL,   0 }, /* Video Capture */
-       [ASPEED_CLK_GATE_BCLK] =        {  4,  8, "bclk-gate",          "bclk", 0 }, /* PCIe/PCI */
-       [ASPEED_CLK_GATE_DCLK] =        {  5, -1, "dclk-gate",          NULL,   0 }, /* DAC */
+       [ASPEED_CLK_GATE_BCLK] =        {  4,  8, "bclk-gate",          "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+       [ASPEED_CLK_GATE_DCLK] =        {  5, -1, "dclk-gate",          NULL,   CLK_IS_CRITICAL }, /* DAC */
        [ASPEED_CLK_GATE_REFCLK] =      {  6, -1, "refclk-gate",        "clkin", CLK_IS_CRITICAL },
        [ASPEED_CLK_GATE_USBPORT2CLK] = {  7,  3, "usb-port2-gate",     NULL,   0 }, /* USB2.0 Host port 2 */
        [ASPEED_CLK_GATE_LCLK] =        {  8,  5, "lclk-gate",          NULL,   0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
 {
        struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
        u32 clk = BIT(gate->clock_idx);
+       u32 rst = BIT(gate->reset_idx);
        u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
        u32 reg;
 
+       /*
+        * If the IP is in reset, treat the clock as not enabled,
+        * this happens with some clocks such as the USB one when
+        * coming from cold reset. Without this, aspeed_clk_enable()
+        * will fail to lift the reset.
+        */
+       if (gate->reset_idx >= 0) {
+               regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
+               if (reg & rst)
+                       return 0;
+       }
+
        regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
 
        return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
 static void __init aspeed_ast2400_cc(struct regmap *map)
 {
        struct clk_hw *hw;
-       u32 val, freq, div;
+       u32 val, div, clkin, hpll;
+       const u16 hpll_rates[][4] = {
+               {384, 360, 336, 408},
+               {400, 375, 350, 425},
+       };
+       int rate;
 
        /*
         * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
         * strapping
         */
        regmap_read(map, ASPEED_STRAP, &val);
-       if (val & CLKIN_25MHZ_EN)
-               freq = 25000000;
-       else if (val & AST2400_CLK_SOURCE_SEL)
-               freq = 48000000;
-       else
-               freq = 24000000;
-       hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
-       pr_debug("clkin @%u MHz\n", freq / 1000000);
+       rate = (val >> 8) & 3;
+       if (val & CLKIN_25MHZ_EN) {
+               clkin = 25000000;
+               hpll = hpll_rates[1][rate];
+       } else if (val & AST2400_CLK_SOURCE_SEL) {
+               clkin = 48000000;
+               hpll = hpll_rates[0][rate];
+       } else {
+               clkin = 24000000;
+               hpll = hpll_rates[0][rate];
+       }
+       hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
+       pr_debug("clkin @%u MHz\n", clkin / 1000000);
 
        /*
         * High-speed PLL clock derived from the crystal. This the CPU clock,
-        * and we assume that it is enabled
+        * and we assume that it is enabled. It can be configured through the
+        * HPLL_PARAM register, or set to a specified frequency by strapping.
         */
        regmap_read(map, ASPEED_HPLL_PARAM, &val);
-       WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
-       aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
+       if (val & AST2400_HPLL_PROGRAMMED)
+               hw = aspeed_ast2400_calc_pll("hpll", val);
+       else
+               hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
+                               hpll * 1000000);
+
+       aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
 
        /*
         * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
index 9760b52..e2ed078 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
-#include <linux/stringify.h>
 
 #include "clk.h"
 
@@ -2559,7 +2558,7 @@ static const struct {
        unsigned long flag;
        const char *name;
 } clk_flags[] = {
-#define ENTRY(f) { f, __stringify(f) }
+#define ENTRY(f) { f, #f }
        ENTRY(CLK_SET_RATE_GATE),
        ENTRY(CLK_SET_PARENT_GATE),
        ENTRY(CLK_SET_RATE_PARENT),
index 58f546e..e4cf96b 100644 (file)
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
        struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
        unsigned long divider;
 
-       divider = meson_parm_read(clk->map, &adiv->div);
+       divider = meson_parm_read(clk->map, &adiv->div) + 1;
 
        return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
 }
index 2406584..177fffb 100644 (file)
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div2_div" },
                .num_parents = 1,
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index 6860bd5..44e4e27 100644 (file)
@@ -35,6 +35,7 @@
 #define CLK_SEL                0x10
 #define CLK_DIS                0x14
 
+#define  ARMADA_37XX_DVFS_LOAD_1 1
 #define LOAD_LEVEL_NR  4
 
 #define ARMADA_37XX_NB_L0L1    0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
        return -EINVAL;
 }
 
+/*
+ * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+ * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * amount of time to let VDD stabilize to the appropriate
+ * voltage. This amount of time is large enough that it cannot be
+ * covered by the hardware countdown register. Due to this, the CPU
+ * might start operating at L0 before the voltage is stabilized,
+ * leading to CPU stalls.
+ *
+ * To work around this problem, we prevent switching directly from the
+ * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+ * frequency in-between. The sequence therefore becomes:
+ * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 2. Sleep 20ms for stabling VDD voltage
+ * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ */
+static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+{
+       unsigned int cur_level;
+
+       if (rate != 1200 * 1000 * 1000)
+               return;
+
+       regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+       cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+               return;
+
+       regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+                          ARMADA_37XX_NB_CPU_LOAD_MASK,
+                          ARMADA_37XX_DVFS_LOAD_1);
+       msleep(20);
+}
+
 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
                               unsigned long parent_rate)
 {
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
                         */
                        reg = ARMADA_37XX_NB_CPU_LOAD;
                        mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+                       clk_pm_cpu_set_rate_wa(rate, base);
+
                        regmap_update_bits(base, reg, mask, load_level);
 
                        return rate;
index 9f35b3f..ff8d66f 100644 (file)
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
 
 static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
        .halt_reg = 0x75018,
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x75018,
                .enable_mask = BIT(0),
index 1a25ee4..4b20d1b 100644 (file)
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
                .name = "mmagic_bimc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = ALWAYS_ON,
 };
 
 static struct gdsc mmagic_video_gdsc = {
index 3c39712..d4ed002 100644 (file)
@@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
 
 #ifdef CONFIG_ACPI
 
-static bool intel_pstate_get_ppc_enable_status(void)
+static bool intel_pstate_acpi_pm_profile_server(void)
 {
        if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
            acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
                return true;
 
+       return false;
+}
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+       if (intel_pstate_acpi_pm_profile_server())
+               return true;
+
        return acpi_ppc;
 }
 
@@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 {
 }
+
+static inline bool intel_pstate_acpi_pm_profile_server(void)
+{
+       return false;
+}
 #endif
 
 static inline void update_turbo_state(void)
@@ -1841,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
                intel_pstate_hwp_enable(cpu);
 
                id = x86_match_cpu(intel_pstate_hwp_boost_ids);
-               if (id)
+               if (id && intel_pstate_acpi_pm_profile_server())
                        hwp_boost = true;
        }
 
index 29389ac..efc9a7a 100644 (file)
@@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
 static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
+       {}
 };
 
 /*
index 1c6cbda..09d823d 100644 (file)
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
                return;
        }
 
+       count -= initial;
+
        if (initial)
                asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
                              : "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 
        asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
                      : "+S"(input), "+D"(output)
-                     : "d"(control_word), "b"(key), "c"(count - initial));
+                     : "d"(control_word), "b"(key), "c"(count));
 }
 
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
        if (count < cbc_fetch_blocks)
                return cbc_crypt(input, output, key, iv, control_word, count);
 
+       count -= initial;
+
        if (initial)
                asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
                              : "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 
        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
                      : "+S" (input), "+D" (output), "+a" (iv)
-                     : "d" (control_word), "b" (key), "c" (count-initial));
+                     : "d" (control_word), "b" (key), "c" (count));
        return iv;
 }
 
index 781a4a3..d8e159f 100644 (file)
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
 config EFI_ARMSTUB
        bool
 
+config EFI_ARMSTUB_DTB_LOADER
+       bool "Enable the DTB loader"
+       depends on EFI_ARMSTUB
+       help
+         Select this config option to add support for the dtb= command
+         line parameter, allowing a device tree blob to be loaded into
+         memory from the EFI System Partition by the stub.
+
+         The device tree is typically provided by the platform or by
+         the bootloader, so this option is mostly for development
+         purposes only.
+
 config EFI_BOOTLOADER_CONTROL
        tristate "EFI Bootloader Control"
        depends on EFI_VARS
index 3bf0dca..a7902fc 100644 (file)
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
 {
        static atomic64_t seq;
 
-       if (!atomic64_read(&seq))
-               atomic64_set(&seq, ((u64)get_seconds()) << 32);
+       if (!atomic64_read(&seq)) {
+               time64_t time = ktime_get_real_seconds();
+
+               /*
+                * This code is unlikely to still be needed in year 2106,
+                * but just in case, let's use a few more bits for timestamps
+                * after y2038 to be sure they keep increasing monotonically
+                * for the next few hundred years...
+                */
+               if (time < 0x80000000)
+                       atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+               else
+                       atomic64_set(&seq, 0x8000000000000000ull |
+                                          ktime_get_real_seconds() << 24);
+       }
 
        return atomic64_inc_return(&seq);
 }
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
                else
                        goto err_section_too_small;
 #if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-       } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+       } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
                struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
 
                printk("%ssection_type: ARM processor error\n", newpfx);
index 232f491..d8a33a7 100644 (file)
@@ -84,6 +84,8 @@ struct mm_struct efi_mm = {
        .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
 };
 
+struct workqueue_struct *efi_rts_wq;
+
 static bool disable_runtime;
 static int __init setup_noefi(char *arg)
 {
@@ -337,6 +339,18 @@ static int __init efisubsys_init(void)
        if (!efi_enabled(EFI_BOOT))
                return 0;
 
+       /*
+        * Since we process only one efi_runtime_service() at a time, an
+        * ordered workqueue (which creates only one execution context)
+        * should suffice all our needs.
+        */
+       efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+       if (!efi_rts_wq) {
+               pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return 0;
+       }
+
        /* We register the efi directory at /sys/firmware/efi */
        efi_kobj = kobject_create_and_add("efi", firmware_kobj);
        if (!efi_kobj) {
@@ -388,7 +402,7 @@ subsys_initcall(efisubsys_init);
  * and if so, populate the supplied memory descriptor with the appropriate
  * data.
  */
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 {
        efi_memory_desc_t *md;
 
@@ -406,12 +420,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
                u64 size;
                u64 end;
 
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_DATA &&
-                   md->type != EFI_RUNTIME_SERVICES_DATA) {
-                       continue;
-               }
-
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
                if (phys_addr >= md->phys_addr && phys_addr < end) {
index 1ab80e0..5d06bd2 100644 (file)
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
                return;
 
        rc = efi_mem_desc_lookup(efi.esrt, &md);
-       if (rc < 0) {
+       if (rc < 0 ||
+           (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+            md.type != EFI_BOOT_SERVICES_DATA &&
+            md.type != EFI_RUNTIME_SERVICES_DATA)) {
                pr_warn("ESRT header is not in the memory map.\n");
                return;
        }
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
 
        end = esrt_data + size;
        pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
-       efi_mem_reserve(esrt_data, esrt_data_size);
+       if (md.type == EFI_BOOT_SERVICES_DATA)
+               efi_mem_reserve(esrt_data, esrt_data_size);
 
        pr_debug("esrt-init: loaded.\n");
 }
index 01a9d78..6920033 100644 (file)
 
 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
 
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
-                            void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_t *image = __image;
-       efi_file_handle_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-
-       status = sys_table_arg->boottime->handle_protocol(handle,
-                                &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       status = io->open_volume(io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table_arg, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
 void efi_char16_printk(efi_system_table_t *sys_table_arg,
                              efi_char16_t *str)
 {
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
         * 'dtb=' unless UEFI Secure Boot is disabled.  We assume that secure
         * boot is enabled if we can't determine its state.
         */
-       if (secure_boot != efi_secureboot_mode_disabled &&
-           strstr(cmdline_ptr, "dtb=")) {
-               pr_efi(sys_table, "Ignoring DTB from command line.\n");
+       if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+            secure_boot != efi_secureboot_mode_disabled) {
+               if (strstr(cmdline_ptr, "dtb="))
+                       pr_efi(sys_table, "Ignoring DTB from command line.\n");
        } else {
                status = handle_cmdline_files(sys_table, image, cmdline_ptr,
                                              "dtb=",
index 50a9cab..e94975f 100644 (file)
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
        return efi_call_proto(efi_file_handle, close, handle);
 }
 
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+                                   efi_loaded_image_t *image,
+                                   efi_file_handle_t **__fh)
+{
+       efi_file_io_interface_t *io;
+       efi_file_handle_t *fh;
+       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+       efi_status_t status;
+       void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+                                                            device_handle,
+                                                            image);
+
+       status = efi_call_early(handle_protocol, handle,
+                               &fs_proto, (void **)&io);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+               return status;
+       }
+
+       status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table_arg, "Failed to open volume\n");
+       else
+               *__fh = fh;
+
+       return status;
+}
+
 /*
  * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
  * option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
                /* Only open the volume once. */
                if (!i) {
-                       status = efi_open_volume(sys_table_arg, image,
-                                                (void **)&fh);
+                       status = efi_open_volume(sys_table_arg, image, &fh);
                        if (status != EFI_SUCCESS)
                                goto free_files;
                }
index f59564b..32799cf 100644 (file)
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
 
 void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
 
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
-                            void **__fh);
-
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
 
 efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
index ae54870..aa66cbf 100644 (file)
@@ -1,6 +1,15 @@
 /*
  * runtime-wrappers.c - Runtime Services function call wrappers
  *
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
 #include <linux/mutex.h>
 #include <linux/semaphore.h>
 #include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
 #include <asm/efi.h>
 
 /*
 #define __efi_call_virt(f, args...) \
        __efi_call_virt_pointer(efi.systab->runtime, f, args)
 
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+       GET_TIME,
+       SET_TIME,
+       GET_WAKEUP_TIME,
+       SET_WAKEUP_TIME,
+       GET_VARIABLE,
+       GET_NEXT_VARIABLE,
+       SET_VARIABLE,
+       QUERY_VARIABLE_INFO,
+       GET_NEXT_HIGH_MONO_COUNT,
+       UPDATE_CAPSULE,
+       QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work:   Details of EFI Runtime Service work
+ * @arg<1-5>:          EFI Runtime Service function arguments
+ * @status:            Status of executing EFI Runtime Service
+ * @efi_rts_id:                EFI Runtime Service function identifier
+ * @efi_rts_comp:      Struct used for handling completions
+ */
+struct efi_runtime_work {
+       void *arg1;
+       void *arg2;
+       void *arg3;
+       void *arg4;
+       void *arg5;
+       efi_status_t status;
+       struct work_struct work;
+       enum efi_rts_ids efi_rts_id;
+       struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work:     Queue efi_runtime_service() and wait until it's done
+ * @rts:               efi_runtime_service() function identifier
+ * @rts_arg<1-5>:      efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5)                \
+({                                                                     \
+       struct efi_runtime_work efi_rts_work;                           \
+       efi_rts_work.status = EFI_ABORTED;                              \
+                                                                       \
+       init_completion(&efi_rts_work.efi_rts_comp);                    \
+       INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);            \
+       efi_rts_work.arg1 = _arg1;                                      \
+       efi_rts_work.arg2 = _arg2;                                      \
+       efi_rts_work.arg3 = _arg3;                                      \
+       efi_rts_work.arg4 = _arg4;                                      \
+       efi_rts_work.arg5 = _arg5;                                      \
+       efi_rts_work.efi_rts_id = _rts;                                 \
+                                                                       \
+       /*                                                              \
+        * queue_work() returns 0 if work was already on queue,         \
+        * _ideally_ this should never happen.                          \
+        */                                                             \
+       if (queue_work(efi_rts_wq, &efi_rts_work.work))                 \
+               wait_for_completion(&efi_rts_work.efi_rts_comp);        \
+       else                                                            \
+               pr_err("Failed to queue work to efi_rts_wq.\n");        \
+                                                                       \
+       efi_rts_work.status;                                            \
+})
+
 void efi_call_virt_check_flags(unsigned long flags, const char *call)
 {
        unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
  */
 static DEFINE_SEMAPHORE(efi_runtime_lock);
 
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+       struct efi_runtime_work *efi_rts_work;
+       void *arg1, *arg2, *arg3, *arg4, *arg5;
+       efi_status_t status = EFI_NOT_FOUND;
+
+       efi_rts_work = container_of(work, struct efi_runtime_work, work);
+       arg1 = efi_rts_work->arg1;
+       arg2 = efi_rts_work->arg2;
+       arg3 = efi_rts_work->arg3;
+       arg4 = efi_rts_work->arg4;
+       arg5 = efi_rts_work->arg5;
+
+       switch (efi_rts_work->efi_rts_id) {
+       case GET_TIME:
+               status = efi_call_virt(get_time, (efi_time_t *)arg1,
+                                      (efi_time_cap_t *)arg2);
+               break;
+       case SET_TIME:
+               status = efi_call_virt(set_time, (efi_time_t *)arg1);
+               break;
+       case GET_WAKEUP_TIME:
+               status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+                                      (efi_bool_t *)arg2, (efi_time_t *)arg3);
+               break;
+       case SET_WAKEUP_TIME:
+               status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+                                      (efi_time_t *)arg2);
+               break;
+       case GET_VARIABLE:
+               status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+                                      (efi_guid_t *)arg2, (u32 *)arg3,
+                                      (unsigned long *)arg4, (void *)arg5);
+               break;
+       case GET_NEXT_VARIABLE:
+               status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+                                      (efi_char16_t *)arg2,
+                                      (efi_guid_t *)arg3);
+               break;
+       case SET_VARIABLE:
+               status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+                                      (efi_guid_t *)arg2, *(u32 *)arg3,
+                                      *(unsigned long *)arg4, (void *)arg5);
+               break;
+       case QUERY_VARIABLE_INFO:
+               status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+                                      (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+               break;
+       case GET_NEXT_HIGH_MONO_COUNT:
+               status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+               break;
+       case UPDATE_CAPSULE:
+               status = efi_call_virt(update_capsule,
+                                      (efi_capsule_header_t **)arg1,
+                                      *(unsigned long *)arg2,
+                                      *(unsigned long *)arg3);
+               break;
+       case QUERY_CAPSULE_CAPS:
+               status = efi_call_virt(query_capsule_caps,
+                                      (efi_capsule_header_t **)arg1,
+                                      *(unsigned long *)arg2, (u64 *)arg3,
+                                      (int *)arg4);
+               break;
+       default:
+               /*
+                * Ideally, we should never reach here because a caller of this
+                * function should have put the right efi_runtime_service()
+                * function identifier into efi_rts_work->efi_rts_id
+                */
+               pr_err("Requested executing invalid EFI Runtime Service.\n");
+       }
+       efi_rts_work->status = status;
+       complete(&efi_rts_work->efi_rts_comp);
+}
+
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        efi_status_t status;
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_time, tm, tc);
+       status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_time, tm);
+       status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+       status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+                               NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_wakeup_time, enabled, tm);
+       status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+                               NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_variable, name, vendor, attr, data_size,
-                              data);
+       status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+                               data);
        up(&efi_runtime_lock);
        return status;
 }
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_next_variable, name_size, name, vendor);
+       status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_variable, name, vendor, attr, data_size,
-                              data);
+       status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+                               data);
        up(&efi_runtime_lock);
        return status;
 }
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(query_variable_info, attr, storage_space,
-                              remaining_space, max_variable_size);
+       status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+                               remaining_space, max_variable_size, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_next_high_mono_count, count);
+       status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(update_capsule, capsules, count, sg_list);
+       status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
-                              reset_type);
+       status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+                               max_size, reset_type, NULL);
        up(&efi_runtime_lock);
        return status;
 }
index d3cf950..58faeb1 100644 (file)
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
        fwspec.param_count = 2;
        fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
-       fwspec.param[1] = IRQ_TYPE_NONE;
+       /*
+        * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
+        * temporarily. Anyway, ->irq_set_type() will override it later.
+        */
+       fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
 
        return irq_create_fwspec_mapping(&fwspec);
 }
index e2232cb..addd9fe 100644 (file)
@@ -25,6 +25,7 @@
 
 struct acpi_gpio_event {
        struct list_head node;
+       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
        struct list_head events;
 };
 
+static LIST_HEAD(acpi_gpio_initial_sync_list);
+static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
        if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
+static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
+{
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
+static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
+{
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       if (!list_empty(&event->initial_sync_list))
+               list_del_init(&event->initial_sync_list);
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
        unsigned long irqflags;
-       int ret, pin, irq;
+       int ret, pin, irq, value;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
+       value = gpiod_get_value(desc);
+
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
                dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
+       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
                enable_irq_wake(irq);
 
        list_add_tail(&event->node, &acpi_gpio->events);
+
+       /*
+        * Make sure we trigger the initial state of the IRQ when using RISING
+        * or FALLING.  Note we run the handlers on late_init, the AML code
+        * may refer to OperationRegions from other (builtin) drivers which
+        * may be probed after us.
+        */
+       if (handler == acpi_gpio_irq_handler &&
+           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
+               acpi_gpio_add_to_initial_sync_list(event);
+
        return AE_OK;
 
 fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
+               acpi_gpio_del_from_initial_sync_list(event);
+
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
 
        return con_id == NULL;
 }
+
+/* Sync the initial state of handlers after all builtin drivers have probed */
+static int acpi_gpio_initial_sync(void)
+{
+       struct acpi_gpio_event *event, *ep;
+
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
+                                initial_sync_list) {
+               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+               list_del_init(&event->initial_sync_list);
+       }
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_initial_sync);
index 28d9680..53a14ee 100644 (file)
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
         * Note that active low is the default.
         */
        if (IS_ENABLED(CONFIG_REGULATOR) &&
-           (of_device_is_compatible(np, "reg-fixed-voltage") ||
+           (of_device_is_compatible(np, "regulator-fixed") ||
+            of_device_is_compatible(np, "reg-fixed-voltage") ||
             of_device_is_compatible(np, "regulator-gpio"))) {
                /*
                 * The regulator GPIO handles are specified such that the
index 73021b3..dd3ff2f 100644 (file)
@@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
        else
                status = connector_status_disconnected;
 
+       /*
+        * The bridge resets its registers on unplug. So when we get a plug
+        * event and we're already supposed to be powered, cycle the bridge to
+        * restore its state.
+        */
+       if (status == connector_status_connected &&
+           adv7511->connector.status == connector_status_disconnected &&
+           adv7511->powered) {
+               regcache_mark_dirty(adv7511->regmap);
+               adv7511_power_on(adv7511);
+       }
+
        if (adv7511->connector.status != status) {
                adv7511->connector.status = status;
                if (status == connector_status_disconnected)
index 130da51..81e3219 100644 (file)
@@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
-       struct drm_plane *plane;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
+       struct drm_plane *plane = NULL;
+       struct drm_plane_state *old_plane_state = NULL;
+       struct drm_plane_state *new_plane_state = NULL;
        const struct drm_plane_helper_funcs *funcs;
        int i, n_planes = 0;
 
@@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
        if (n_planes != 1)
                return -EINVAL;
 
-       if (!new_plane_state->crtc)
+       if (!new_plane_state->crtc ||
+           old_plane_state->crtc != new_plane_state->crtc)
                return -EINVAL;
 
        funcs = plane->helper_private;
index 3c4000f..f973d28 100644 (file)
@@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
                ctx->handle = drm_legacy_ctxbitmap_next(dev);
        }
        DRM_DEBUG("%d\n", ctx->handle);
-       if (ctx->handle == -1) {
+       if (ctx->handle < 0) {
                DRM_DEBUG("Not enough free contexts.\n");
                /* Should this return -EBUSY instead? */
                return -ENOMEM;
index 52f3b91..71e1aa5 100644 (file)
@@ -652,6 +652,7 @@ enum intel_sbi_destination {
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 #define QUIRK_INCREASE_T12_DELAY (1<<6)
+#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
 
 struct intel_fbdev;
 struct intel_fbc_work;
index f4a8598..fed26d6 100644 (file)
@@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
        I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
 }
 
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder)
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
        uint32_t val = I915_READ(reg);
 
        val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
        val |= TRANS_DDI_PORT_NONE;
        I915_WRITE(reg, val);
+
+       if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+               /* Quirk time at 100ms for reliable operation */
+               msleep(100);
+       }
 }
 
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
index 2cc6faa..dec0d60 100644 (file)
@@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
                intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
 
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+               intel_ddi_disable_transcoder_func(old_crtc_state);
 
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_scaler_disable(intel_crtc);
@@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
        DRM_INFO("Applying T12 delay quirk\n");
 }
 
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+       DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Toshiba Satellite P50-C-18C */
        { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+       /* GeminiLake NUC */
+       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       /* ASRock ITX*/
+       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
index 0361130..b8eefbf 100644 (file)
@@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
 void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
 struct intel_encoder *
index 56dd7a9..dd5312b 100644 (file)
@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(imx_ldb->regmap);
        }
 
+       /* disable LDB by resetting the control register to POR default */
+       regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
        imx_ldb->dev = dev;
 
        if (of_id)
@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                if (ret || i < 0 || i > 1)
                        return -EINVAL;
 
+               if (!of_device_is_available(child))
+                       continue;
+
                if (dual && i > 0) {
                        dev_warn(dev, "dual-channel mode, ignoring second output\n");
                        continue;
                }
 
-               if (!of_device_is_available(child))
-                       continue;
-
                channel = &imx_ldb->channel[i];
                channel->ldb = imx_ldb;
                channel->chno = i;
index 1d34619..a951ec7 100644 (file)
@@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
                        vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
                if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
                        vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+       } else {
+               vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+               vc4_state->y_scaling[1] = VC4_SCALING_NONE;
        }
 
        vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
index caa05b0..5450a2d 100644 (file)
@@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
                break;
        case V4L2_MBUS_BT656:
                csicfg->ext_vsync = 0;
-               if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+               if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
+                   mbus_fmt->field == V4L2_FIELD_ALTERNATE)
                        csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
                else
                        csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
index 75d6ab1..7379043 100644 (file)
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
        /*
         * It's not always possible to have 1 to 2 ratio when d=7, so fall back
         * to minimal possible clkh in this case.
+        *
+        * Note:
+        * CLKH is not allowed to be 0, in this case I2C clock is not generated
+        * at all
         */
-       if (clk >= clkl + d) {
+       if (clk > clkl + d) {
                clkh = clk - clkl - d;
                clkl -= d;
        } else {
-               clkh = 0;
+               clkh = 1;
                clkl = clk - (d << 1);
        }
 
index 0207e19..498c5e8 100644 (file)
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
                goto err_desc;
        }
 
+       reinit_completion(&dma->cmd_complete);
        txdesc->callback = i2c_imx_dma_callback;
        txdesc->callback_param = i2c_imx;
        if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
         * The first byte must be transmitted by the CPU.
         */
        imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        if (result)
                return result;
 
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
                        "gpio");
        rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
-       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
+       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
 
        if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
            PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
index 5e310ef..3c1c817 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 
 /* register offsets */
 #define ID_ARBLOST     (1 << 3)
 #define ID_NACK                (1 << 4)
 /* persistent flags */
+#define ID_P_NO_RXDMA  (1 << 30) /* HW forbids RXDMA sometimes */
 #define ID_P_PM_BLOCKED        (1 << 31)
-#define ID_P_MASK      ID_P_PM_BLOCKED
+#define ID_P_MASK      (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
 
 enum rcar_i2c_type {
        I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
        struct dma_chan *dma_rx;
        struct scatterlist sg;
        enum dma_data_direction dma_direction;
+
+       struct reset_control *rstc;
 };
 
 #define rcar_i2c_priv_to_dev(p)                ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
        dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
                         sg_dma_len(&priv->sg), priv->dma_direction);
 
+       /* Gen3 can only do one RXDMA per transfer and we just completed it */
+       if (priv->devtype == I2C_RCAR_GEN3 &&
+           priv->dma_direction == DMA_FROM_DEVICE)
+               priv->flags |= ID_P_NO_RXDMA;
+
        priv->dma_direction = DMA_NONE;
 }
 
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
        unsigned char *buf;
        int len;
 
-       /* Do not use DMA if it's not available or for messages < 8 bytes */
-       if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
+       /* Do various checks to see if DMA is feasible at all */
+       if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
+           (read && priv->flags & ID_P_NO_RXDMA))
                return;
 
        if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
        }
 }
 
+/* I2C is a special case, we need to poll the status of a reset */
+static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+{
+       int i, ret;
+
+       ret = reset_control_reset(priv->rstc);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < LOOP_TIMEOUT; i++) {
+               ret = reset_control_status(priv->rstc);
+               if (ret == 0)
+                       return 0;
+               udelay(1);
+       }
+
+       return -ETIMEDOUT;
+}
+
 static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
                                struct i2c_msg *msgs,
                                int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        pm_runtime_get_sync(dev);
 
+       /* Gen3 needs a reset before allowing RXDMA once */
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->flags |= ID_P_NO_RXDMA;
+               if (!IS_ERR(priv->rstc)) {
+                       ret = rcar_i2c_do_reset(priv);
+                       if (ret == 0)
+                               priv->flags &= ~ID_P_NO_RXDMA;
+               }
+       }
+
        rcar_i2c_init(priv);
 
        ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out_pm_put;
 
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+               if (!IS_ERR(priv->rstc)) {
+                       ret = reset_control_status(priv->rstc);
+                       if (ret < 0)
+                               priv->rstc = ERR_PTR(-ENOTSUPP);
+               }
+       }
+
        /* Stay always active when multi-master to keep arbitration working */
        if (of_property_read_bool(dev->of_node, "multi-master"))
                priv->flags |= ID_P_PM_BLOCKED;
index 1f41a4f..8a87397 100644 (file)
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
        if (priv->len_recv) {
                /* read length byte */
                rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+               /*
+                * We expect at least 2 interrupts for I2C_M_RECV_LEN
+                * transactions. The length is updated during the first
+                * interrupt, and the buffer contents are only copied
+                * during subsequent interrupts. If in case the interrupts
+                * get merged we would complete the transaction without
+                * copying out the bytes from RX fifo. To avoid this now we
+                * drain the fifo as and when data is available.
+                * We drained the rlen byte already, decrement total length
+                * by one.
+                */
+
+               len--;
                if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
                        rlen = 0;       /*abort transfer */
                        priv->msg_buf_remaining = 0;
                        priv->msg_len = 0;
-               } else {
-                       *buf++ = rlen;
-                       if (priv->client_pec)
-                               ++rlen; /* account for error check byte */
-                       /* update remaining bytes and message length */
-                       priv->msg_buf_remaining = rlen;
-                       priv->msg_len = rlen + 1;
+                       xlp9xx_i2c_update_rlen(priv);
+                       return;
                }
+
+               *buf++ = rlen;
+               if (priv->client_pec)
+                       ++rlen; /* account for error check byte */
+               /* update remaining bytes and message length */
+               priv->msg_buf_remaining = rlen;
+               priv->msg_len = rlen + 1;
                xlp9xx_i2c_update_rlen(priv);
                priv->len_recv = false;
-       } else {
-               len = min(priv->msg_buf_remaining, len);
-               for (i = 0; i < len; i++, buf++)
-                       *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
-
-               priv->msg_buf_remaining -= len;
        }
 
+       len = min(priv->msg_buf_remaining, len);
+       for (i = 0; i < len; i++, buf++)
+               *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+       priv->msg_buf_remaining -= len;
        priv->msg_buf = buf;
 
        if (priv->msg_buf_remaining)
index 301285c..15c95aa 100644 (file)
@@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
 static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
                                 unsigned int flags)
 {
-       rt_mutex_lock(&adapter->bus_lock);
+       rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
 }
 
 /**
index 300ab4b..29646aa 100644 (file)
@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
        struct i2c_mux_priv *priv = adapter->algo_data;
        struct i2c_adapter *parent = priv->muxc->parent;
 
-       rt_mutex_lock(&parent->mux_lock);
+       rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
        if (!(flags & I2C_LOCK_ROOT_ADAPTER))
                return;
        i2c_lock_bus(parent, flags);
@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
        struct i2c_mux_priv *priv = adapter->algo_data;
        struct i2c_adapter *parent = priv->muxc->parent;
 
-       rt_mutex_lock(&parent->mux_lock);
+       rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
        i2c_lock_bus(parent, flags);
 }
 
index cc06e84..583d3a1 100644 (file)
@@ -1984,15 +1984,64 @@ static int modify_qp(struct ib_uverbs_file *file,
                goto release_qp;
        }
 
-       if ((cmd->base.attr_mask & IB_QP_AV) &&
-           !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
-               ret = -EINVAL;
-               goto release_qp;
+       if ((cmd->base.attr_mask & IB_QP_AV)) {
+               if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+                       ret = -EINVAL;
+                       goto release_qp;
+               }
+
+               if (cmd->base.attr_mask & IB_QP_STATE &&
+                   cmd->base.qp_state == IB_QPS_RTR) {
+               /* We are in INIT->RTR TRANSITION (if we are not,
+                * this transition will be rejected in subsequent checks).
+                * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
+                * but the IB_QP_STATE flag is required.
+                *
+                * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
+                * when IB_QP_AV is set, has required inclusion of a valid
+                * port number in the primary AV. (AVs are created and handled
+                * differently for infiniband and ethernet (RoCE) ports).
+                *
+                * Check the port number included in the primary AV against
+                * the port number in the qp struct, which was set (and saved)
+                * in the RST->INIT transition.
+                */
+                       if (cmd->base.dest.port_num != qp->real_qp->port) {
+                               ret = -EINVAL;
+                               goto release_qp;
+                       }
+               } else {
+               /* We are in SQD->SQD. (If we are not, this transition will
+                * be rejected later in the verbs layer checks).
+                * Check for both IB_QP_PORT and IB_QP_AV, these can be set
+                * together in the SQD->SQD transition.
+                *
+                * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
+                * verbs layer driver does not track primary port changes
+                * resulting from path migration. Thus, in SQD, if the primary
+                * AV is modified, the primary port should also be modified).
+                *
+                * Note that in this transition, the IB_QP_STATE flag
+                * is not allowed.
+                */
+                       if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+                            == (IB_QP_AV | IB_QP_PORT)) &&
+                           cmd->base.port_num != cmd->base.dest.port_num) {
+                               ret = -EINVAL;
+                               goto release_qp;
+                       }
+                       if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
+                           == IB_QP_AV) {
+                               cmd->base.attr_mask |= IB_QP_PORT;
+                               cmd->base.port_num = cmd->base.dest.port_num;
+                       }
+               }
        }
 
        if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
            (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
-           !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
+           !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
+           cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
                ret = -EINVAL;
                goto release_qp;
        }
index a4e404a..5c7afde 100644 (file)
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
  #define HIL_DATA              0x1
  #define HIL_CMD               0x3
  #define HIL_IRQ               2
- #define hil_readb(p)          readb(p)
- #define hil_writeb(v,p)       writeb((v),(p))
+ #define hil_readb(p)          readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p)      writeb((v), (volatile void __iomem *)(p))
 
 #else
 #error "HIL is not supported on this platform"
index 1f9cd7d..f5ae248 100644 (file)
@@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
        { "ELAN0618", 0 },
+       { "ELAN061D", 0 },
+       { "ELAN0622", 0 },
        { "ELAN1000", 0 },
        { }
 };
index b353d49..136f6e7 100644 (file)
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
                },
        },
+       {
+               /* Lenovo LaVie Z */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+               },
+       },
        { }
 };
 
index e9233db..d564d21 100644 (file)
@@ -8,7 +8,7 @@ config ARM_GIC
        bool
        select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
 
 config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
 config ARM_GIC_V3
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select IRQ_DOMAIN_HIERARCHY
        select PARTITION_PERCPU
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
 config ARM_VIC
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config ARM_VIC_NR
        int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
        bool
        select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config ATMEL_AIC5_IRQ
        bool
        select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
 config FARADAY_FTINTC010
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
        bool
        depends on ARCH_CLPS711X
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
        default y
 
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
 config ORION_IRQCHIP
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config PIC32_EVIC
        bool
index 4eca5c7..606efa6 100644 (file)
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
         */
        info->scratchpad[0].ul = mc_bus_dev->icid;
        msi_info = msi_get_domain_info(msi_domain->parent);
+
+       /* Allocate at least 32 MSIs, and always as a power of 2 */
+       nvec = max_t(int, 32, roundup_pow_of_two(nvec));
        return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
 }
 
index 25a98de..8d6d009 100644 (file)
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
 {
        struct pci_dev *pdev, *alias_dev;
        struct msi_domain_info *msi_info;
-       int alias_count = 0;
+       int alias_count = 0, minnvec = 1;
 
        if (!dev_is_pci(dev))
                return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
        /* ITS specific DeviceID, as the core ITS ignores dev. */
        info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
 
-       return msi_info->ops->msi_prepare(domain->parent,
-                                         dev, max(nvec, alias_count), info);
+       /*
+        * Always allocate a power of 2, and special case device 0 for
+        * broken systems where the DevID is not wired (and all devices
+        * appear as DevID 0). For that reason, we generously allocate a
+        * minimum of 32 MSIs for DevID 0. If you want more because all
+        * your devices are aliasing to DevID 0, consider fixing your HW.
+        */
+       nvec = max(nvec, alias_count);
+       if (!info->scratchpad[0].ul)
+               minnvec = 32;
+       nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+       return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
 }
 
 static struct msi_domain_ops its_pci_msi_ops = {
index 8881a05..7b8e87b 100644 (file)
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
        /* ITS specific DeviceID, as the core ITS ignores dev. */
        info->scratchpad[0].ul = dev_id;
 
+       /* Allocate at least 32 MSIs, and always as a power of 2 */
+       nvec = max_t(int, 32, roundup_pow_of_two(nvec));
        return msi_info->ops->msi_prepare(domain->parent,
                                          dev, nvec, info);
 }
index d7842d3..316a575 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/dma-iommu.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
 #include <linux/log2.h>
 #include <linux/mm.h>
 #include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
 } vpe_proxy;
 
 static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
 static struct rdists *gic_rdists;
 static struct irq_domain *its_parent;
 
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
        .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
 };
 
+
 /*
  * How we allocate LPIs:
  *
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
  *
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
  */
-#define IRQS_PER_CHUNK_SHIFT   5
-#define IRQS_PER_CHUNK         (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+       struct list_head        entry;
+       u32                     base_id;
+       u32                     span;
+};
 
-static int its_lpi_to_chunk(int lpi)
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
 {
-       return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+       struct lpi_range *range;
+
+       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       if (range) {
+               INIT_LIST_HEAD(&range->entry);
+               range->base_id = base;
+               range->span = span;
+       }
+
+       return range;
 }
 
-static int its_chunk_to_lpi(int chunk)
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
 {
-       return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+       struct lpi_range *ra, *rb;
+
+       ra = container_of(a, struct lpi_range, entry);
+       rb = container_of(b, struct lpi_range, entry);
+
+       return rb->base_id - ra->base_id;
 }
 
-static int __init its_lpi_init(u32 id_bits)
+static void merge_lpi_ranges(void)
 {
-       lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+       struct lpi_range *range, *tmp;
 
-       lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
-                            GFP_KERNEL);
-       if (!lpi_bitmap) {
-               lpi_chunks = 0;
-               return -ENOMEM;
+       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+               if (!list_is_last(&range->entry, &lpi_range_list) &&
+                   (tmp->base_id == (range->base_id + range->span))) {
+                       tmp->base_id = range->base_id;
+                       tmp->span += range->span;
+                       list_del(&range->entry);
+                       kfree(range);
+               }
        }
+}
 
-       pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
-       return 0;
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+       struct lpi_range *range, *tmp;
+       int err = -ENOSPC;
+
+       mutex_lock(&lpi_range_lock);
+
+       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+               if (range->span >= nr_lpis) {
+                       *base = range->base_id;
+                       range->base_id += nr_lpis;
+                       range->span -= nr_lpis;
+
+                       if (range->span == 0) {
+                               list_del(&range->entry);
+                               kfree(range);
+                       }
+
+                       err = 0;
+                       break;
+               }
+       }
+
+       mutex_unlock(&lpi_range_lock);
+
+       pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+       return err;
 }
 
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
 {
-       unsigned long *bitmap = NULL;
-       int chunk_id;
-       int nr_chunks;
-       int i;
+       struct lpi_range *new;
+       int err = 0;
+
+       mutex_lock(&lpi_range_lock);
+
+       new = mk_lpi_range(base, nr_lpis);
+       if (!new) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       list_add(&new->entry, &lpi_range_list);
+       list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+       merge_lpi_ranges();
+out:
+       mutex_unlock(&lpi_range_lock);
+       return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+       u32 lpis = (1UL << id_bits) - 8192;
+       u32 numlpis;
+       int err;
+
+       numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+       if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+               lpis = numlpis;
+               pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+                       lpis);
+       }
 
-       nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+       /*
+        * Initializing the allocator is just the same as freeing the
+        * full range of LPIs.
+        */
+       err = free_lpi_range(8192, lpis);
+       pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+       return err;
+}
 
-       spin_lock(&lpi_lock);
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+       unsigned long *bitmap = NULL;
+       int err = 0;
 
        do {
-               chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
-                                                     0, nr_chunks, 0);
-               if (chunk_id < lpi_chunks)
+               err = alloc_lpi_range(nr_irqs, base);
+               if (!err)
                        break;
 
-               nr_chunks--;
-       } while (nr_chunks > 0);
+               nr_irqs /= 2;
+       } while (nr_irqs > 0);
 
-       if (!nr_chunks)
+       if (err)
                goto out;
 
-       bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
-                        sizeof(long),
-                        GFP_ATOMIC);
+       bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
        if (!bitmap)
                goto out;
 
-       for (i = 0; i < nr_chunks; i++)
-               set_bit(chunk_id + i, lpi_bitmap);
-
-       *base = its_chunk_to_lpi(chunk_id);
-       *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+       *nr_ids = nr_irqs;
 
 out:
-       spin_unlock(&lpi_lock);
-
        if (!bitmap)
                *base = *nr_ids = 0;
 
        return bitmap;
 }
 
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
 {
-       int lpi;
-
-       spin_lock(&lpi_lock);
-
-       for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
-               int chunk = its_lpi_to_chunk(lpi);
-
-               BUG_ON(chunk > lpi_chunks);
-               if (test_bit(chunk, lpi_bitmap)) {
-                       clear_bit(chunk, lpi_bitmap);
-               } else {
-                       pr_err("Bad LPI chunk %d\n", chunk);
-               }
-       }
-
-       spin_unlock(&lpi_lock);
-
+       WARN_ON(free_lpi_range(base, nr_ids));
        kfree(bitmap);
 }
 
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
 {
        struct its_node *its;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
 
        list_for_each_entry(its, &its_nodes, entry)
                its_cpu_init_collection(its);
 
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 }
 
 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        if (!its_alloc_device_table(its, dev_id))
                return NULL;
 
+       if (WARN_ON(!is_power_of_2(nvecs)))
+               nvecs = roundup_pow_of_two(nvecs);
+
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        /*
-        * We allocate at least one chunk worth of LPIs bet device,
-        * and thus that many ITEs. The device may require less though.
+        * Even if the device wants a single LPI, the ITT must be
+        * sized as a power of two (and you need at least one bit...).
         */
-       nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+       nr_ites = max(2, nvecs);
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kzalloc(sz, GFP_KERNEL);
        if (alloc_lpis) {
-               lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+               lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
                if (lpi_map)
                        col_map = kcalloc(nr_lpis, sizeof(*col_map),
                                          GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        /* If all interrupts have been freed, start mopping the floor */
        if (bitmap_empty(its_dev->event_map.lpi_map,
                         its_dev->event_map.nr_lpis)) {
-               its_lpi_free_chunks(its_dev->event_map.lpi_map,
-                                   its_dev->event_map.lpi_base,
-                                   its_dev->event_map.nr_lpis);
+               its_lpi_free(its_dev->event_map.lpi_map,
+                            its_dev->event_map.lpi_base,
+                            its_dev->event_map.nr_lpis);
                kfree(its_dev->event_map.col_map);
 
                /* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
        }
 
        if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
-               its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+               its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
                its_free_prop_table(vm->vprop_page);
        }
 }
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 
        BUG_ON(!vm);
 
-       bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+       bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
        if (!bitmap)
                return -ENOMEM;
 
        if (nr_ids < nr_irqs) {
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                return -ENOMEM;
        }
 
        vprop_page = its_allocate_prop_table(GFP_KERNEL);
        if (!vprop_page) {
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                return -ENOMEM;
        }
 
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
                if (i > 0)
                        its_vpe_irq_domain_free(domain, virq, i - 1);
 
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                its_free_prop_table(vprop_page);
        }
 
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
        struct its_node *its;
        int err = 0;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
 
@@ -3102,7 +3171,7 @@ err:
                        writel_relaxed(its->ctlr_save, base + GITS_CTLR);
                }
        }
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 
        return err;
 }
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
        struct its_node *its;
        int ret;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
                int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
                    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
                        its_cpu_init_collection(its);
        }
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 }
 
 static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
        if (err)
                goto out_free_tables;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_add(&its->entry, &its_nodes);
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 
        return 0;
 
index 76ea56d..e214181 100644 (file)
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
        .flags                  = IRQCHIP_SET_TYPE_MASKED,
 };
 
-#define GIC_ID_NR              (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
         * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
         */
        typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
-       gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+       gic_data.rdists.gicd_typer = typer;
        gic_irqs = GICD_TYPER_IRQS(typer);
        if (gic_irqs > 1020)
                gic_irqs = 1020;
index fc5953d..2ff0898 100644 (file)
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
        return ingenic_intc_of_init(node, 1);
 }
 IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
 
 static int __init intc_2chip_of_init(struct device_node *node,
        struct device_node *parent)
index 3a7e890..3df527f 100644 (file)
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
 };
 
 static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+       { .exti = 0, .irq_parent = 6 },
        { .exti = 1, .irq_parent = 7 },
        { .exti = 2, .irq_parent = 8 },
        { .exti = 3, .irq_parent = 9 },
index edb35a5..a99fc0c 100644 (file)
@@ -728,9 +728,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
  */
 void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
 {
-       struct vsp1_device *vsp1 = dev_get_drvdata(dev);
-
-       mutex_lock(&vsp1->drm->lock);
 }
 EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
 
@@ -846,6 +843,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
 
        drm_pipe->crc = cfg->crc;
 
+       mutex_lock(&vsp1->drm->lock);
        vsp1_du_pipeline_setup_inputs(vsp1, pipe);
        vsp1_du_pipeline_configure(pipe);
        mutex_unlock(&vsp1->drm->lock);
index fcfab66..81b150e 100644 (file)
@@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
 
        rcu_assign_pointer(raw->progs, new_array);
        bpf_prog_array_free(old_array);
+       bpf_prog_put(prog);
 unlock:
        mutex_unlock(&ir_raw_handler_lock);
        return ret;
index 2e0066b..e794890 100644 (file)
@@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data)
                while (kfifo_out(&raw->kfifo, &ev, 1)) {
                        if (is_timing_event(ev)) {
                                if (ev.duration == 0)
-                                       dev_err(&dev->dev, "nonsensical timing event of duration 0");
+                                       dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
                                if (is_timing_event(raw->prev_ev) &&
                                    !is_transition(&ev, &raw->prev_ev))
-                                       dev_err(&dev->dev, "two consecutive events of type %s",
-                                               TO_STR(ev.pulse));
+                                       dev_warn_once(&dev->dev, "two consecutive events of type %s",
+                                                     TO_STR(ev.pulse));
                                if (raw->prev_ev.reset && ev.pulse == 0)
-                                       dev_err(&dev->dev, "timing event after reset should be pulse");
+                                       dev_warn_once(&dev->dev, "timing event after reset should be pulse");
                        }
                        list_for_each_entry(handler, &ir_raw_handler_list, list)
                                if (dev->enabled_protocols &
index 2e222d9..ca68e1d 100644 (file)
@@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t)
        spin_unlock_irqrestore(&dev->keylock, flags);
 }
 
+static unsigned int repeat_period(int protocol)
+{
+       if (protocol >= ARRAY_SIZE(protocols))
+               return 100;
+
+       return protocols[protocol].repeat_period;
+}
+
 /**
  * rc_repeat() - signals that a key is still pressed
  * @dev:       the struct rc_dev descriptor of the device
@@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev)
 {
        unsigned long flags;
        unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
-               msecs_to_jiffies(protocols[dev->last_protocol].repeat_period);
+               msecs_to_jiffies(repeat_period(dev->last_protocol));
        struct lirc_scancode sc = {
                .scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
                .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED,
@@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
 
        if (dev->keypressed) {
                dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
-                       msecs_to_jiffies(protocols[protocol].repeat_period);
+                       msecs_to_jiffies(repeat_period(protocol));
                mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
        }
        spin_unlock_irqrestore(&dev->keylock, flags);
index 75f781c..de4e6e5 100644 (file)
@@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
        int i;
 
        for_each_sg(data->sg, sg, data->sg_len, i) {
-               void *buf = kmap_atomic(sg_page(sg) + sg->offset;
+               void *buf = kmap_atomic(sg_page(sg) + sg->offset);
                buffer_swap32(buf, sg->length);
                kunmap_atomic(buf);
+       }
 }
 #else
 static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
index 63e3844..217b790 100644 (file)
@@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                goto err_upper_unlink;
        }
 
+       bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+
        /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
@@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
        if (bond_mode_can_use_xmit_hash(bond))
                bond_update_slave_arr(bond, NULL);
 
-       bond->nest_level = dev_get_nest_level(bond_dev);
 
        netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
                    slave_dev->name,
@@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        }
 }
 
+static int bond_get_nest_level(struct net_device *bond_dev)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+
+       return bond->nest_level;
+}
+
 static void bond_get_stats(struct net_device *bond_dev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
        struct list_head *iter;
        struct slave *slave;
 
-       spin_lock(&bond->stats_lock);
+       spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
        memcpy(stats, &bond->bond_stats, sizeof(*stats));
 
        rcu_read_lock();
@@ -4227,6 +4235,7 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_neigh_setup        = bond_neigh_setup,
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
+       .ndo_get_lock_subclass  = bond_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
@@ -4725,6 +4734,7 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
+       bond->nest_level = SINGLE_DEPTH_NESTING;
        netdev_lockdep_set_classes(bond_dev);
 
        list_add_tail(&bond->bond_list, &bn->dev_list);
index 98663c5..4d5d01c 100644 (file)
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
 static int bond_option_mode_set(struct bonding *bond,
                                const struct bond_opt_value *newval)
 {
-       if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
-               netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
-                          newval->string);
-               /* disable arp monitoring */
-               bond->params.arp_interval = 0;
-               /* set miimon to default value */
-               bond->params.miimon = BOND_DEFAULT_MIIMON;
-               netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
-                          bond->params.miimon);
+       if (!bond_mode_uses_arp(newval->value)) {
+               if (bond->params.arp_interval) {
+                       netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+                                  newval->string);
+                       /* disable arp monitoring */
+                       bond->params.arp_interval = 0;
+               }
+
+               if (!bond->params.miimon) {
+                       /* set miimon to default value */
+                       bond->params.miimon = BOND_DEFAULT_MIIMON;
+                       netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+                                  bond->params.miimon);
+               }
        }
 
        if (newval->value == BOND_MODE_ALB)
index b397a33..9b44940 100644 (file)
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
        int err;
 
        err = pm_runtime_get_sync(priv->device);
-       if (err)
+       if (err < 0) {
                pm_runtime_put_noidle(priv->device);
+               return err;
+       }
 
-       return err;
+       return 0;
 }
 
 static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
 
        } else {
        /* Version 3.1.x or 3.2.x */
-               cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+               cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
+                         CCCR_NISO);
 
                /* Only 3.2.x has NISO Bit implemented */
                if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
        priv->can.clock.freq = clk_get_rate(cclk);
        priv->mram_base = mram_addr;
 
-       m_can_of_parse_mram(priv, mram_config_vals);
-
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       m_can_of_parse_mram(priv, mram_config_vals);
+
        devm_can_led_init(dev);
 
        of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
        return ret;
 }
 
-/* TODO: runtime PM with power down or sleep mode  */
-
 static __maybe_unused int m_can_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       m_can_init_ram(priv);
-
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
        if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
                if (ret)
                        return ret;
 
+               m_can_init_ram(priv);
                m_can_start(ndev);
                netif_device_attach(ndev);
                netif_start_queue(ndev);
index c7427bd..2949a38 100644 (file)
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
                return 0;
        }
        cdm = of_iomap(np_cdm, 0);
+       if (!cdm) {
+               of_node_put(np_cdm);
+               dev_err(&ofdev->dev, "can't map clock node!\n");
+               return 0;
+       }
 
        if (in_8(&cdm->ipb_clk_sel) & 0x1)
                freq *= 2;
index b9e2857..455a379 100644 (file)
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
 #define PCIEFD_REG_SYS_VER1            0x0040  /* version reg #1 */
 #define PCIEFD_REG_SYS_VER2            0x0044  /* version reg #2 */
 
+#define PCIEFD_FW_VERSION(x, y, z)     (((u32)(x) << 24) | \
+                                        ((u32)(y) << 16) | \
+                                        ((u32)(z) << 8))
+
 /* System Control Registers Bits */
 #define PCIEFD_SYS_CTL_TS_RST          0x00000001      /* timestamp clock */
 #define PCIEFD_SYS_CTL_CLK_EN          0x00000002      /* system clock */
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
                 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
                 hw_ver_major, hw_ver_minor, hw_ver_sub);
 
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
+        * 64-bit logical addresses: this workaround forces usage of 32-bit
+        * DMA addresses only when such a fw is detected.
+        */
+       if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+           PCIEFD_FW_VERSION(3, 3, 0)) {
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       dev_warn(&pdev->dev,
+                                "warning: can't set DMA mask %llxh (err %d)\n",
+                                DMA_BIT_MASK(32), err);
+       }
+#endif
+
        /* stop system clock */
        pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
                            PCIEFD_REG_SYS_CTL_CLR);
index 12ff002..b7dfd41 100644 (file)
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
                usb_free_urb(dev->intr_urb);
 
                kfree(dev->intr_in_buffer);
+               kfree(dev->tx_msg_buffer);
        }
 }
 
index 89aec07..5a24039 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright (C) 2012 - 2014 Xilinx, Inc.
  * Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 Sandvik Mining and Construction Oy
  *
  * Description:
  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/can/dev.h>
@@ -101,7 +104,7 @@ enum xcan_reg {
 #define XCAN_INTR_ALL          (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
                                 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
                                 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
-                                XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+                                XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
 
 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
 #define XCAN_BTR_SJW_SHIFT             7  /* Synchronous jump width */
@@ -118,6 +121,7 @@ enum xcan_reg {
 /**
  * struct xcan_priv - This definition define CAN driver instance
  * @can:                       CAN private data structure.
+ * @tx_lock:                   Lock for synchronizing TX interrupt handling
  * @tx_head:                   Tx CAN packets ready to send on the queue
  * @tx_tail:                   Tx CAN packets successfully sended on the queue
  * @tx_max:                    Maximum number packets the driver can send
@@ -132,6 +136,7 @@ enum xcan_reg {
  */
 struct xcan_priv {
        struct can_priv can;
+       spinlock_t tx_lock;
        unsigned int tx_head;
        unsigned int tx_tail;
        unsigned int tx_max;
@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
        .brp_inc = 1,
 };
 
+#define XCAN_CAP_WATERMARK     0x0001
+struct xcan_devtype_data {
+       unsigned int caps;
+};
+
 /**
  * xcan_write_reg_le - Write a value to the device register little endian
  * @priv:      Driver private data structure
@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
                usleep_range(500, 10000);
        }
 
+       /* reset clears FIFOs */
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+
        return 0;
 }
 
@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 id, dlc, data[2] = {0, 0};
+       unsigned long flags;
 
        if (can_dropped_invalid_skb(ndev, skb))
                return NETDEV_TX_OK;
@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
 
        can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
        priv->tx_head++;
 
        /* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                stats->tx_bytes += cf->can_dlc;
        }
 
+       /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+       if (priv->tx_max > 1)
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
        /* Check if the TX buffer is full */
        if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
                netif_stop_queue(ndev);
 
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
        return 1;
 }
 
+/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev:      Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+       if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+               return CAN_STATE_ERROR_PASSIVE;
+       else if (status & XCAN_SR_ERRWRN_MASK)
+               return CAN_STATE_ERROR_WARNING;
+       else
+               return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev:      Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf:                Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+                                enum can_state new_state,
+                                struct can_frame *cf)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+       u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+       u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+
+       priv->can.state = new_state;
+
+       if (cf) {
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[6] = txerr;
+               cf->data[7] = rxerr;
+       }
+
+       switch (new_state) {
+       case CAN_STATE_ERROR_PASSIVE:
+               priv->can.can_stats.error_passive++;
+               if (cf)
+                       cf->data[1] = (rxerr > 127) ?
+                                       CAN_ERR_CRTL_RX_PASSIVE :
+                                       CAN_ERR_CRTL_TX_PASSIVE;
+               break;
+       case CAN_STATE_ERROR_WARNING:
+               priv->can.can_stats.error_warning++;
+               if (cf)
+                       cf->data[1] |= (txerr > rxerr) ?
+                                       CAN_ERR_CRTL_TX_WARNING :
+                                       CAN_ERR_CRTL_RX_WARNING;
+               break;
+       case CAN_STATE_ERROR_ACTIVE:
+               if (cf)
+                       cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+               break;
+       default:
+               /* non-ERROR states are handled elsewhere */
+               WARN_ON(1);
+               break;
+       }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev:      Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       enum can_state old_state = priv->can.state;
+       enum can_state new_state;
+
+       /* changing error state due to successful frame RX/TX can only
+        * occur from these states
+        */
+       if (old_state != CAN_STATE_ERROR_WARNING &&
+           old_state != CAN_STATE_ERROR_PASSIVE)
+               return;
+
+       new_state = xcan_current_error_state(ndev);
+
+       if (new_state != old_state) {
+               struct sk_buff *skb;
+               struct can_frame *cf;
+
+               skb = alloc_can_err_skb(ndev, &cf);
+
+               xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+               if (skb) {
+                       struct net_device_stats *stats = &ndev->stats;
+
+                       stats->rx_packets++;
+                       stats->rx_bytes += cf->can_dlc;
+                       netif_rx(skb);
+               }
+       }
+}
+
 /**
  * xcan_err_interrupt - error frame Isr
  * @ndev:      net_device pointer
@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf;
        struct sk_buff *skb;
-       u32 err_status, status, txerr = 0, rxerr = 0;
+       u32 err_status;
 
        skb = alloc_can_err_skb(ndev, &cf);
 
        err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
        priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
-       txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
-       rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
-                       XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
-       status = priv->read_reg(priv, XCAN_SR_OFFSET);
 
        if (isr & XCAN_IXR_BSOFF_MASK) {
                priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
                can_bus_off(ndev);
                if (skb)
                        cf->can_id |= CAN_ERR_BUSOFF;
-       } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
-               priv->can.state = CAN_STATE_ERROR_PASSIVE;
-               priv->can.can_stats.error_passive++;
-               if (skb) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (rxerr > 127) ?
-                                       CAN_ERR_CRTL_RX_PASSIVE :
-                                       CAN_ERR_CRTL_TX_PASSIVE;
-                       cf->data[6] = txerr;
-                       cf->data[7] = rxerr;
-               }
-       } else if (status & XCAN_SR_ERRWRN_MASK) {
-               priv->can.state = CAN_STATE_ERROR_WARNING;
-               priv->can.can_stats.error_warning++;
-               if (skb) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] |= (txerr > rxerr) ?
-                                       CAN_ERR_CRTL_TX_WARNING :
-                                       CAN_ERR_CRTL_RX_WARNING;
-                       cf->data[6] = txerr;
-                       cf->data[7] = rxerr;
-               }
+       } else {
+               enum can_state new_state = xcan_current_error_state(ndev);
+
+               xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
        }
 
        /* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
        if (isr & XCAN_IXR_RXOFLW_MASK) {
                stats->rx_over_errors++;
                stats->rx_errors++;
-               priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
                if (skb) {
                        cf->can_id |= CAN_ERR_CRTL;
                        cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
 
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
-               if (isr & XCAN_IXR_RXOK_MASK) {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXOK_MASK);
-                       work_done += xcan_rx(ndev);
-               } else {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXNEMP_MASK);
-                       break;
-               }
+               work_done += xcan_rx(ndev);
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
                isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
 
-       if (work_done)
+       if (work_done) {
                can_led_event(ndev, CAN_LED_EVENT_RX);
+               xcan_update_error_state_after_rxtx(ndev);
+       }
 
        if (work_done < quota) {
                napi_complete_done(napi, work_done);
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+               ier |= XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
        }
        return work_done;
@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
        struct net_device_stats *stats = &ndev->stats;
+       unsigned int frames_in_fifo;
+       int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+       unsigned long flags;
+       int retries = 0;
+
+       /* Synchronize with xmit as we need to know the exact number
+        * of frames in the FIFO to stay in sync due to the TXFEMP
+        * handling.
+        * This also prevents a race between netif_wake_queue() and
+        * netif_stop_queue().
+        */
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
+       frames_in_fifo = priv->tx_head - priv->tx_tail;
+
+       if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+               /* clear TXOK anyway to avoid getting back here */
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               return;
+       }
+
+       /* Check if 2 frames were sent (TXOK only means that at least 1
+        * frame was sent).
+        */
+       if (frames_in_fifo > 1) {
+               WARN_ON(frames_in_fifo > priv->tx_max);
+
+               /* Synchronize TXOK and isr so that after the loop:
+                * (1) isr variable is up-to-date at least up to TXOK clear
+                *     time. This avoids us clearing a TXOK of a second frame
+                *     but not noticing that the FIFO is now empty and thus
+                *     marking only a single frame as sent.
+                * (2) No TXOK is left. Having one could mean leaving a
+                *     stray TXOK as we might process the associated frame
+                *     via TXFEMP handling as we read TXFEMP *after* TXOK
+                *     clear to satisfy (1).
+                */
+               while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+                       priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+                       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+               }
 
-       while ((priv->tx_head - priv->tx_tail > 0) &&
-                       (isr & XCAN_IXR_TXOK_MASK)) {
+               if (isr & XCAN_IXR_TXFEMP_MASK) {
+                       /* nothing in FIFO anymore */
+                       frames_sent = frames_in_fifo;
+               }
+       } else {
+               /* single frame in fifo, just clear TXOK */
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+       }
+
+       while (frames_sent--) {
                can_get_echo_skb(ndev, priv->tx_tail %
                                        priv->tx_max);
                priv->tx_tail++;
                stats->tx_packets++;
-               isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
-       can_led_event(ndev, CAN_LED_EVENT_TX);
+
        netif_wake_queue(ndev);
+
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+       can_led_event(ndev, CAN_LED_EVENT_TX);
+       xcan_update_error_state_after_rxtx(ndev);
 }
 
 /**
@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
        struct net_device *ndev = (struct net_device *)dev_id;
        struct xcan_priv *priv = netdev_priv(ndev);
        u32 isr, ier;
+       u32 isr_errors;
 
        /* Get the interrupt status from Xilinx CAN */
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
                xcan_tx_interrupt(ndev, isr);
 
        /* Check for the type of error interrupt and Processing it */
-       if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
-                       XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
-               priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
-                               XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
-                               XCAN_IXR_ARBLST_MASK));
+       isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+                           XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+       if (isr_errors) {
+               priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
                xcan_err_interrupt(ndev, isr);
        }
 
        /* Check for the type of receive interrupt and Processing it */
-       if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+       if (isr & XCAN_IXR_RXNEMP_MASK) {
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+               ier &= ~XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
                napi_schedule(&priv->napi);
        }
@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
 static void xcan_chip_stop(struct net_device *ndev)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
-       u32 ier;
 
        /* Disable interrupts and leave the can in configuration mode */
-       ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-       ier &= ~XCAN_INTR_ALL;
-       priv->write_reg(priv, XCAN_IER_OFFSET, ier);
-       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+       set_reset_mode(ndev);
        priv->can.state = CAN_STATE_STOPPED;
 }
 
@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
  */
 static int __maybe_unused xcan_suspend(struct device *dev)
 {
-       if (!device_may_wakeup(dev))
-               return pm_runtime_force_suspend(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
 
-       return 0;
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+               xcan_chip_stop(ndev);
+       }
+
+       return pm_runtime_force_suspend(dev);
 }
 
 /**
@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
  */
 static int __maybe_unused xcan_resume(struct device *dev)
 {
-       if (!device_may_wakeup(dev))
-               return pm_runtime_force_resume(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
+       int ret;
 
-       return 0;
+       ret = pm_runtime_force_resume(dev);
+       if (ret) {
+               dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+               return ret;
+       }
+
+       if (netif_running(ndev)) {
+               ret = xcan_chip_start(ndev);
+               if (ret) {
+                       dev_err(dev, "xcan_chip_start failed on resume\n");
+                       return ret;
+               }
+
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
 
+       return 0;
 }
 
 /**
@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct xcan_priv *priv = netdev_priv(ndev);
 
-       if (netif_running(ndev)) {
-               netif_stop_queue(ndev);
-               netif_device_detach(ndev);
-       }
-
-       priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
-       priv->can.state = CAN_STATE_SLEEPING;
-
        clk_disable_unprepare(priv->bus_clk);
        clk_disable_unprepare(priv->can_clk);
 
@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct xcan_priv *priv = netdev_priv(ndev);
        int ret;
-       u32 isr, status;
 
        ret = clk_prepare_enable(priv->bus_clk);
        if (ret) {
@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
                return ret;
        }
 
-       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
-       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
-       status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
-       if (netif_running(ndev)) {
-               if (isr & XCAN_IXR_BSOFF_MASK) {
-                       priv->can.state = CAN_STATE_BUS_OFF;
-                       priv->write_reg(priv, XCAN_SRR_OFFSET,
-                                       XCAN_SRR_RESET_MASK);
-               } else if ((status & XCAN_SR_ESTAT_MASK) ==
-                                       XCAN_SR_ESTAT_MASK) {
-                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
-               } else if (status & XCAN_SR_ERRWRN_MASK) {
-                       priv->can.state = CAN_STATE_ERROR_WARNING;
-               } else {
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
-               netif_device_attach(ndev);
-               netif_start_queue(ndev);
-       }
-
        return 0;
 }
 
@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
 };
 
+static const struct xcan_devtype_data xcan_zynq_data = {
+       .caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+       { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+       { .compatible = "xlnx,axi-can-1.00.a", },
+       { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
 /**
  * xcan_probe - Platform registration call
  * @pdev:      Handle to the platform device structure
@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
        struct resource *res; /* IO mem resources */
        struct net_device *ndev;
        struct xcan_priv *priv;
+       const struct of_device_id *of_id;
+       int caps = 0;
        void __iomem *addr;
-       int ret, rx_max, tx_max;
+       int ret, rx_max, tx_max, tx_fifo_depth;
 
        /* Get the virtual base address for the device */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+                                  &tx_fifo_depth);
        if (ret < 0)
                goto err;
 
@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err;
 
+       of_id = of_match_device(xcan_of_match, &pdev->dev);
+       if (of_id) {
+               const struct xcan_devtype_data *devtype_data = of_id->data;
+
+               if (devtype_data)
+                       caps = devtype_data->caps;
+       }
+
+       /* There is no way to directly figure out how many frames have been
+        * sent when the TXOK interrupt is processed. If watermark programming
+        * is supported, we can have 2 frames in the FIFO and use TXFEMP
+        * to determine if 1 or 2 frames have been sent.
+        * Theoretically we should be able to use TXFWMEMP to determine up
+        * to 3 frames, but it seems that after putting a second frame in the
+        * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+        * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+        * sent), which is not a sensible state - possibly TXFWMEMP is not
+        * completely synchronized with the rest of the bits?
+        */
+       if (caps & XCAN_CAP_WATERMARK)
+               tx_max = min(tx_fifo_depth, 2);
+       else
+               tx_max = 1;
+
        /* Create a CAN device instance */
        ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
        if (!ndev)
@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
                                        CAN_CTRLMODE_BERR_REPORTING;
        priv->reg_base = addr;
        priv->tx_max = tx_max;
+       spin_lock_init(&priv->tx_lock);
 
        /* Get IRQ for the device */
        ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
 
-       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
                        priv->reg_base, ndev->irq, priv->can.clock.freq,
-                       priv->tx_max);
+                       tx_fifo_depth, priv->tx_max);
 
        return 0;
 
@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
-       { .compatible = "xlnx,zynq-can-1.0", },
-       { .compatible = "xlnx,axi-can-1.00.a", },
-       { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
 static struct platform_driver xcan_driver = {
        .probe = xcan_probe,
        .remove = xcan_remove,
index 437cd6e..bb28c70 100644 (file)
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
+/* To be called with reg_lock held */
 static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
 {
        int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
 
 static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
 {
-       mv88e6xxx_g1_irq_free_common(chip);
-
+       /*
+        * free_irq must be called without reg_lock taken because the irq
+        * handler takes this lock, too.
+        */
        free_irq(chip->irq, chip);
+
+       mutex_lock(&chip->reg_lock);
+       mv88e6xxx_g1_irq_free_common(chip);
+       mutex_unlock(&chip->reg_lock);
 }
 
 static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
 
 static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
 {
-       mv88e6xxx_g1_irq_free_common(chip);
-
        kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
        kthread_destroy_worker(chip->kworker);
+
+       mutex_lock(&chip->reg_lock);
+       mv88e6xxx_g1_irq_free_common(chip);
+       mutex_unlock(&chip->reg_lock);
 }
 
 int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -2608,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .rmu_disable = mv88e6085_g1_rmu_disable,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
-       .serdes_power = mv88e6341_serdes_power,
 };
 
 static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2774,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .serdes_power = mv88e6341_serdes_power,
        .gpio_ops = &mv88e6352_gpio_ops,
 };
 
@@ -2951,7 +2960,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
-       .serdes_power = mv88e6341_serdes_power,
 };
 
 static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3327,6 +3335,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .reset = mv88e6352_g1_reset,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+       .serdes_power = mv88e6341_serdes_power,
        .gpio_ops = &mv88e6352_gpio_ops,
        .avb_ops = &mv88e6390_avb_ops,
 };
@@ -4506,12 +4515,10 @@ out_g2_irq:
        if (chip->info->g2_irqs > 0)
                mv88e6xxx_g2_irq_free(chip);
 out_g1_irq:
-       mutex_lock(&chip->reg_lock);
        if (chip->irq > 0)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
-       mutex_unlock(&chip->reg_lock);
 out:
        if (pdata)
                dev_put(pdata->netdev);
@@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
        if (chip->info->g2_irqs > 0)
                mv88e6xxx_g2_irq_free(chip);
 
-       mutex_lock(&chip->reg_lock);
        if (chip->irq > 0)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
-       mutex_unlock(&chip->reg_lock);
 }
 
 static const struct of_device_id mv88e6xxx_of_match[] = {
index 5b7658b..5c3ef9f 100644 (file)
@@ -32,7 +32,7 @@ config EL3
 
 config 3C515
        tristate "3c515 ISA \"Fast EtherLink\""
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !PPC32
        ---help---
          If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
          network card, say Y here.
index b6d735b..342ae08 100644 (file)
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
 static void dayna_block_output(struct net_device *dev, int count,
                               const unsigned char *buf, int start_page);
 
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)   memcpy((void *)(a), (b), (c))
-
 #define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
 
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
        unsigned long outdata = 0xA5A0B5B0;
        unsigned long indata =  0x00000000;
        /* Try writing 32 bits */
-       memcpy_toio(membase, &outdata, 4);
+       memcpy_toio((void __iomem *)membase, &outdata, 4);
        /* Now compare them */
        if (memcmp_withio(&outdata, membase, 4) == 0)
                return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
                              struct e8390_pkt_hdr *hdr, int ring_page)
 {
        unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-       memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+       memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
        /* Fix endianness */
        hdr->count = swab16(hdr->count);
 }
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
        if (xfer_start + count > ei_status.rmem_end) {
                /* We must wrap the input move. */
                int semi_count = ei_status.rmem_end - xfer_start;
-               memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+               memcpy_fromio(skb->data,
+                             (void __iomem *)dev->mem_start + xfer_base,
                              semi_count);
                count -= semi_count;
-               memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
-                             count);
+               memcpy_fromio(skb->data + semi_count,
+                             (void __iomem *)ei_status.rmem_start, count);
        } else {
-               memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+               memcpy_fromio(skb->data,
+                             (void __iomem *)dev->mem_start + xfer_base,
+                             count);
        }
 }
 
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
 {
        long shmem = (start_page - WD_START_PG)<<8;
 
-       memcpy_toio(dev->mem_start + shmem, buf, count);
+       memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
 }
 
 /* dayna block input/output */
index 1b9d313..17f12c1 100644 (file)
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 
        memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
+       io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
        io_sq->desc_entry_size =
                (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
                sizeof(struct ena_eth_io_tx_desc) :
index f273af1..9e5cf55 100644 (file)
@@ -44,7 +44,7 @@ config AMD8111_ETH
 
 config LANCE
        tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-       depends on ISA && ISA_DMA_API && !ARM
+       depends on ISA && ISA_DMA_API && !ARM && !PPC32
        ---help---
          If you have a network (Ethernet) card of this type, say Y here.
          Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
 
 config NI65
        tristate "NI6510 support"
-       depends on ISA && ISA_DMA_API && !ARM
+       depends on ISA && ISA_DMA_API && !ARM && !PPC32
        ---help---
          If you have a network (Ethernet) card of this type, say Y here.
 
index 4b5d625..8a3a60b 100644 (file)
@@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
 
                if (pdata->tx_pause != pdata->phy.tx_pause) {
                        new_state = 1;
-                       pdata->hw_if.config_tx_flow_control(pdata);
                        pdata->tx_pause = pdata->phy.tx_pause;
+                       pdata->hw_if.config_tx_flow_control(pdata);
                }
 
                if (pdata->rx_pause != pdata->phy.rx_pause) {
                        new_state = 1;
-                       pdata->hw_if.config_rx_flow_control(pdata);
                        pdata->rx_pause = pdata->phy.rx_pause;
+                       pdata->hw_if.config_rx_flow_control(pdata);
                }
 
                /* Speed support */
index 956860a..3bdab97 100644 (file)
@@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 
        hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
        hw_atl_rpfl2multicast_flr_en_set(self,
-                                        IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+                                        IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
 
        hw_atl_rpfl2_accept_all_mc_packets_set(self,
                                               IS_FILTER_ENABLED(IFF_ALLMULTI));
index 94270f6..7087b88 100644 (file)
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
        skb = build_skb(page_address(page) + adapter->rx_page_offset,
                        adapter->rx_frag_size);
        if (likely(skb)) {
+               skb_reserve(skb, NET_SKB_PAD);
                adapter->rx_page_offset += adapter->rx_frag_size;
                if (adapter->rx_page_offset >= PAGE_SIZE)
                        adapter->rx_page = NULL;
index da18aa2..a4a90b6 100644 (file)
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
-                       return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+                                                true);
                } else if ((info->flow_type == UDP_V6_FLOW) &&
                           (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
-                       return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+                                                true);
                }
                return 0;
 
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
                bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
        }
 
-       return bnx2x_config_rss_eth(bp, false);
+       if (bp->state == BNX2X_STATE_OPEN)
+               return bnx2x_config_rss_eth(bp, false);
+
+       return 0;
 }
 
 /**
index 5d08d2a..e337da6 100644 (file)
@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
        lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
        lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
                              GFP_KERNEL);
+       if (!lmac->dmacs)
+               return -ENOMEM;
 
        /* Enable lmac */
        bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
index 00fc5f1..7dddb9e 100644 (file)
@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
        req->local_port = cpu_to_be16(f->fs.val.lport);
        req->peer_port = cpu_to_be16(f->fs.val.fport);
-       req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
-               f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
-       req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
-               f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+       memcpy(&req->local_ip, f->fs.val.lip, 4);
+       memcpy(&req->peer_ip, f->fs.val.fip, 4);
        req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
                                        f->fs.newvlan == VLAN_REWRITE) |
                                DELACK_V(f->fs.hitcnts) |
index bc03c17..a8926e9 100644 (file)
@@ -3072,6 +3072,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
 
                adapter->geneve_port = 0;
                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+               break;
        default:
                return;
        }
@@ -3157,6 +3158,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
 
                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
                             GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+               break;
        default:
                return;
        }
index 5ab9129..ec0b545 100644 (file)
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
 config CS89x0
        tristate "CS89x0 support"
        depends on ISA || EISA || ARM
+       depends on !PPC32
        ---help---
          Support for CS89x0 chipset based Ethernet cards. If you have a
          network (Ethernet) card of this type, say Y and read the file
index 90c645b..60641e2 100644 (file)
@@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
        return 0;
 }
 
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       bool running = netif_running(netdev);
+       int err = 0;
+
+       ASSERT_RTNL();
+       if (running) {
+               err = enic_stop(netdev);
+               if (err)
+                       return err;
+       }
+
+       netdev->mtu = new_mtu;
+
+       if (running) {
+               err = enic_open(netdev);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct enic *enic = netdev_priv(netdev);
-       int running = netif_running(netdev);
 
        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
                return -EOPNOTSUPP;
 
-       if (running)
-               enic_stop(netdev);
-
-       netdev->mtu = new_mtu;
-
        if (netdev->mtu > enic->port_mtu)
                netdev_warn(netdev,
-                       "interface MTU (%d) set higher than port MTU (%d)\n",
-                       netdev->mtu, enic->port_mtu);
+                           "interface MTU (%d) set higher than port MTU (%d)\n",
+                           netdev->mtu, enic->port_mtu);
 
-       if (running)
-               enic_open(netdev);
-
-       return 0;
+       return _enic_change_mtu(netdev, new_mtu);
 }
 
 static void enic_change_mtu_work(struct work_struct *work)
@@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
        struct enic *enic = container_of(work, struct enic, change_mtu_work);
        struct net_device *netdev = enic->netdev;
        int new_mtu = vnic_dev_mtu(enic->vdev);
-       int err;
-       unsigned int i;
-
-       new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
 
        rtnl_lock();
-
-       /* Stop RQ */
-       del_timer_sync(&enic->notify_timer);
-
-       for (i = 0; i < enic->rq_count; i++)
-               napi_disable(&enic->napi[i]);
-
-       vnic_intr_mask(&enic->intr[0]);
-       enic_synchronize_irqs(enic);
-       err = vnic_rq_disable(&enic->rq[0]);
-       if (err) {
-               rtnl_unlock();
-               netdev_err(netdev, "Unable to disable RQ.\n");
-               return;
-       }
-       vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
-       vnic_cq_clean(&enic->cq[0]);
-       vnic_intr_clean(&enic->intr[0]);
-
-       /* Fill RQ with new_mtu-sized buffers */
-       netdev->mtu = new_mtu;
-       vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
-       /* Need at least one buffer on ring to get going */
-       if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
-               rtnl_unlock();
-               netdev_err(netdev, "Unable to alloc receive buffers.\n");
-               return;
-       }
-
-       /* Start RQ */
-       vnic_rq_enable(&enic->rq[0]);
-       napi_enable(&enic->napi[0]);
-       vnic_intr_unmask(&enic->intr[0]);
-       enic_notify_timer_start(enic);
-
+       (void)_enic_change_mtu(netdev, new_mtu);
        rtnl_unlock();
 
        netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
 
        enic->port_mtu = enic->config.mtu;
-       (void)enic_change_mtu(netdev, enic->port_mtu);
 
        err = enic_set_mac_addr(netdev, enic->mac_addr);
        if (err) {
@@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* MTU range: 68 - 9000 */
        netdev->min_mtu = ENIC_MIN_MTU;
        netdev->max_mtu = ENIC_MAX_MTU;
+       netdev->mtu     = enic->port_mtu;
 
        err = register_netdev(netdev);
        if (err) {
index 5b12272..09e9da1 100644 (file)
@@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
        hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
                                nic_dev, link_status_event_handler);
 
+       SET_NETDEV_DEV(netdev, &pdev->dev);
        err = register_netdev(netdev);
        if (err) {
                dev_err(&pdev->dev, "Failed to register netdev\n");
index 9128858..2353ec8 100644 (file)
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                txq->txq_stats.tx_busy++;
                u64_stats_update_end(&txq->txq_stats.syncp);
                err = NETDEV_TX_BUSY;
+               wqe_size = 0;
                goto flush_skbs;
        }
 
index 7b1b5ac..31bd567 100644 (file)
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
        struct res_srq *srq;
-       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+       int local_qpn = vhcr->in_modifier & 0xffffff;
 
        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
        if (err)
index 323ffe8..456f300 100644 (file)
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
        int i;
 
        buf->size = size;
-       buf->npages = 1 << get_order(size);
+       buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
        buf->page_shift = PAGE_SHIFT;
        buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
                             GFP_KERNEL);
index eb9eb7a..405236c 100644 (file)
@@ -858,8 +858,6 @@ struct mlx5e_profile {
                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
        } rx_handlers;
-       void    (*netdev_registered_init)(struct mlx5e_priv *priv);
-       void    (*netdev_registered_remove)(struct mlx5e_priv *priv);
        int     max_tc;
 };
 
index 75e4308..d258bb6 100644 (file)
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
        HLIST_HEAD(del_list);
        spin_lock_bh(&priv->fs.arfs.arfs_lock);
        mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
-               if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
-                       break;
                if (!work_pending(&arfs_rule->arfs_work) &&
                    rps_may_expire_flow(priv->netdev,
                                        arfs_rule->rxq, arfs_rule->flow_id,
                                        arfs_rule->filter_id)) {
                        hlist_del_init(&arfs_rule->hlist);
                        hlist_add_head(&arfs_rule->hlist, &del_list);
+                       if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+                               break;
                }
        }
        spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
            skb->protocol != htons(ETH_P_IPV6))
                return -EPROTONOSUPPORT;
 
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
        arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
        if (!arfs_t)
                return -EPROTONOSUPPORT;
index 0a52f31..722998d 100644 (file)
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 }
 
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
-                                   struct ieee_ets *ets)
+                                   struct ieee_ets *ets,
+                                   bool zero_sum_allowed)
 {
        bool have_ets_tc = false;
        int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        if (have_ets_tc && bw_sum != 100) {
-               netdev_err(netdev,
-                          "Failed to validate ETS: BW sum is illegal\n");
+               if (bw_sum || (!bw_sum && !zero_sum_allowed))
+                       netdev_err(netdev,
+                                  "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
        }
        return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        if (!MLX5_CAP_GEN(priv->mdev, ets))
                return -EOPNOTSUPP;
 
-       err = mlx5e_dbcnl_validate_ets(netdev, ets);
+       err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
        if (err)
                return err;
 
@@ -441,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
        bool is_new;
        int err;
 
-       if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
-               return -EINVAL;
-
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
-               return -EINVAL;
-
-       if (!MLX5_DSCP_SUPPORTED(priv->mdev))
-               return -EINVAL;
+       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+           !MLX5_DSCP_SUPPORTED(priv->mdev))
+               return -EOPNOTSUPP;
 
-       if (app->protocol >= MLX5E_MAX_DSCP)
+       if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+           (app->protocol >= MLX5E_MAX_DSCP))
                return -EINVAL;
 
        /* Save the old entry info */
@@ -498,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
        struct mlx5e_priv *priv = netdev_priv(dev);
        int err;
 
-       if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
-               return -EINVAL;
-
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
-               return -EINVAL;
-
-       if (!MLX5_DSCP_SUPPORTED(priv->mdev))
-               return -EINVAL;
+       if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+            !MLX5_DSCP_SUPPORTED(priv->mdev))
+               return -EOPNOTSUPP;
 
-       if (app->protocol >= MLX5E_MAX_DSCP)
+       if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+           (app->protocol >= MLX5E_MAX_DSCP))
                return -EINVAL;
 
        /* Skip if no dscp app entry */
@@ -642,12 +636,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
                          ets.prio_tc[i]);
        }
 
-       err = mlx5e_dbcnl_validate_ets(netdev, &ets);
-       if (err) {
-               netdev_err(netdev,
-                          "%s, Failed to validate ETS: %d\n", __func__, err);
+       err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
+       if (err)
                goto out;
-       }
 
        err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
        if (err) {
@@ -1147,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
 {
        int err;
 
-       err =  mlx5_set_trust_state(priv->mdev, trust_state);
+       err = mlx5_set_trust_state(priv->mdev, trust_state);
        if (err)
                return err;
        priv->dcbx_dp.trust_state = trust_state;
@@ -1173,6 +1164,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
+       priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+
        if (!MLX5_DSCP_SUPPORTED(mdev))
                return 0;
 
index dae4156..c592678 100644 (file)
@@ -3712,7 +3712,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
 
        if (!reset) {
                params->sw_mtu = new_mtu;
-               set_mtu_cb(priv);
+               if (set_mtu_cb)
+                       set_mtu_cb(priv);
                netdev->mtu = params->sw_mtu;
                goto out;
        }
index 0edf475..dfbcda0 100644 (file)
@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
        else
                actions = flow->nic_attr->action;
 
+       if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+           !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+               return false;
+
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                return modify_header_match_supported(&parse_attr->spec, exts);
 
@@ -1966,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
 {
        struct mlx5_core_dev *fmdev, *pmdev;
-       u16 func_id, peer_id;
+       u64 fsystem_guid, psystem_guid;
 
        fmdev = priv->mdev;
        pmdev = peer_priv->mdev;
 
-       func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
-       peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+       mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
+       mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
 
-       return (func_id == peer_id);
+       return (fsystem_guid == psystem_guid);
 }
 
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
index b79d748..40dba9e 100644 (file)
@@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        int vport_num;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        esw_info(dev,
@@ -1765,7 +1765,7 @@ abort:
 
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 {
-       if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
+       if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
                return;
 
        esw_info(esw->dev, "cleanup\n");
@@ -2216,6 +2216,6 @@ free_out:
 
 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
 {
-       return esw->mode;
+       return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
 }
 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
index f1a86ce..6ddb256 100644 (file)
@@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
        if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
                if (!fwd_next_prio_supported(ft))
                        return ERR_PTR(-EOPNOTSUPP);
-               if (dest)
+               if (dest_num)
                        return ERR_PTR(-EINVAL);
                mutex_lock(&root->chain_lock);
                next_ft = find_next_chained_ft(prio);
index af3bb2f..b7c21eb 100644 (file)
@@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
                void *ppriv)
 {
        struct mlx5e_priv *priv  = mlx5i_epriv(netdev);
+       u16 max_mtu;
 
        /* priv init */
        priv->mdev        = mdev;
@@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
        priv->ppriv       = ppriv;
        mutex_init(&priv->state_lock);
 
+       mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+       netdev->mtu = max_mtu;
+
        mlx5e_build_nic_params(mdev, &priv->channels.params,
                               profile->max_nch(mdev), netdev->mtu);
        mlx5i_build_nic_params(mdev, &priv->channels.params);
index 1e062e6..3f767cd 100644 (file)
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
 void mlx5_init_clock(struct mlx5_core_dev *mdev)
 {
        struct mlx5_clock *clock = &mdev->clock;
+       u64 overflow_cycles;
        u64 ns;
        u64 frac = 0;
        u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
 
        /* Calculate period in seconds to call the overflow watchdog - to make
         * sure counter is checked at least once every wrap around.
+        * The period is calculated as the minimum between max HW cycles count
+        * (The clock source mask) and max amount of cycles that can be
+        * multiplied by clock multiplier where the result doesn't exceed
+        * 64bits.
         */
-       ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+       overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+       overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+
+       ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
                                 frac, &frac);
-       do_div(ns, NSEC_PER_SEC / 2 / HZ);
+       do_div(ns, NSEC_PER_SEC / HZ);
        clock->overflow_period = ns;
 
        mdev->clock_info_page = alloc_page(GFP_KERNEL);
index b97bb72..86478a6 100644 (file)
@@ -113,35 +113,45 @@ err_db_free:
        return err;
 }
 
-static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
-                                 struct mlx5_wq_qp *qp)
+static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+                                struct mlx5_wq_qp *qp)
 {
+       struct mlx5_frag_buf_ctrl *sq_fbc;
        struct mlx5_frag_buf *rqb, *sqb;
 
-       rqb = &qp->rq.fbc.frag_buf;
+       rqb  = &qp->rq.fbc.frag_buf;
        *rqb = *buf;
        rqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       rqb->npages = 1 << get_order(rqb->size);
+       rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
 
-       sqb = &qp->sq.fbc.frag_buf;
-       *sqb = *buf;
-       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       sqb->npages = 1 << get_order(sqb->size);
+       sq_fbc = &qp->sq.fbc;
+       sqb    = &sq_fbc->frag_buf;
+       *sqb   = *buf;
+       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->sq);
+       sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
        sqb->frags += rqb->npages; /* first part is for the rq */
+       if (sq_fbc->strides_offset)
+               sqb->frags--;
 }
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
+       u32 sq_strides_offset;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
-       mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
-                     MLX5_GET(qpc, qpc, log_sq_size),
-                     &wq->sq.fbc);
+
+       sq_strides_offset =
+               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+
+       mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+                            MLX5_GET(qpc, qpc, log_sq_size),
+                            sq_strides_offset,
+                            &wq->sq.fbc);
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                goto err_db_free;
        }
 
-       mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+       mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
 
        wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
        wq->sq.db  = &wq_ctrl->db.db[MLX5_SND_DBR];
index 3c0d882..f6f6a56 100644 (file)
@@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
        list_add(&resource->list, &block->resource_list);
 }
 
+static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
+{
+       list_del(&resource->list);
+}
+
 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
 {
        struct mlxsw_afa_resource *resource, *tmp;
 
        list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
-               list_del(&resource->list);
                resource->destructor(block, resource);
        }
 }
@@ -530,6 +534,7 @@ static void
 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
                                struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
 {
+       mlxsw_afa_resource_del(&fwd_entry_ref->resource);
        mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
        kfree(fwd_entry_ref);
 }
@@ -579,6 +584,7 @@ static void
 mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
                          struct mlxsw_afa_counter *counter)
 {
+       mlxsw_afa_resource_del(&counter->resource);
        block->afa->ops->counter_index_put(block->afa->ops_priv,
                                           counter->counter_index);
        kfree(counter);
@@ -626,8 +632,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
        char *oneact;
        char *actions;
 
-       if (WARN_ON(block->finished))
-               return NULL;
+       if (block->finished)
+               return ERR_PTR(-EINVAL);
        if (block->cur_act_index + action_size >
            block->afa->max_acts_per_set) {
                struct mlxsw_afa_set *set;
@@ -637,7 +643,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
                 */
                set = mlxsw_afa_set_create(false);
                if (!set)
-                       return NULL;
+                       return ERR_PTR(-ENOBUFS);
                set->prev = block->cur_set;
                block->cur_act_index = 0;
                block->cur_set->next = set;
@@ -724,8 +730,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
                                                  MLXSW_AFA_VLAN_CODE,
                                                  MLXSW_AFA_VLAN_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
                            MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
                            MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
@@ -806,8 +812,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
        return 0;
@@ -820,8 +826,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
                                trap_id);
@@ -836,8 +842,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
 
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
                                trap_id);
@@ -856,6 +862,7 @@ static void
 mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
                         struct mlxsw_afa_mirror *mirror)
 {
+       mlxsw_afa_resource_del(&mirror->resource);
        block->afa->ops->mirror_del(block->afa->ops_priv,
                                    mirror->local_in_port,
                                    mirror->span_id,
@@ -908,8 +915,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
        char *act = mlxsw_afa_block_append_action(block,
                                                  MLXSW_AFA_TRAPDISC_CODE,
                                                  MLXSW_AFA_TRAPDISC_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
                                MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
        mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
@@ -996,8 +1003,8 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
 
        act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
                                            MLXSW_AFA_FORWARD_SIZE);
-       if (!act) {
-               err = -ENOBUFS;
+       if (IS_ERR(act)) {
+               err = PTR_ERR(act);
                goto err_append_action;
        }
        mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
@@ -1052,8 +1059,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
 {
        char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
                                                  MLXSW_AFA_POLCNT_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
                              counter_index);
        return 0;
@@ -1123,8 +1130,8 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
        char *act = mlxsw_afa_block_append_action(block,
                                                  MLXSW_AFA_VIRFWD_CODE,
                                                  MLXSW_AFA_VIRFWD_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
        return 0;
 }
@@ -1193,8 +1200,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
        char *act = mlxsw_afa_block_append_action(block,
                                                  MLXSW_AFA_MCROUTER_CODE,
                                                  MLXSW_AFA_MCROUTER_SIZE);
-       if (!act)
-               return -ENOBUFS;
+       if (IS_ERR(act))
+               return PTR_ERR(act);
        mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
                                expected_irif, min_mtu, rmid_valid, kvdl_index);
        return 0;
index 1decf3a..e57d237 100644 (file)
@@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
                        return NFP_REPR_TYPE_VF;
        }
 
-       return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
+       return __NFP_REPR_TYPE_MAX;
 }
 
 static struct net_device *
@@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
        u8 port = 0;
 
        repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
+       if (repr_type > NFP_REPR_TYPE_MAX)
+               return NULL;
 
        reprs = rcu_dereference(app->reprs[repr_type]);
        if (!reprs)
index 78afe75..382bb93 100644 (file)
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
        payload.dst_ipv4 = flow->daddr;
 
        /* If entry has expired send dst IP with all other fields 0. */
-       if (!(neigh->nud_state & NUD_VALID)) {
+       if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
                nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
                /* Trigger ARP to verify invalid neighbour state. */
                neigh_event_send(neigh, NULL);
index 99973e1..5ede640 100644 (file)
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
 
        p_ramrod->common.update_approx_mcast_flg = 1;
        for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
-               u32 *p_bins = (u32 *)p_params->bins;
+               u32 *p_bins = p_params->bins;
 
                p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
        }
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
                        enum spq_mode comp_mode,
                        struct qed_spq_comp_cb *p_comp_data)
 {
-       unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
        struct vport_update_ramrod_data *p_ramrod = NULL;
+       u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
        u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
        /* explicitly clear out the entire vector */
        memset(&p_ramrod->approx_mcast.bins, 0,
               sizeof(p_ramrod->approx_mcast.bins));
-       memset(bins, 0, sizeof(unsigned long) *
-              ETH_MULTICAST_MAC_BINS_IN_REGS);
+       memset(bins, 0, sizeof(bins));
        /* filter ADD op is explicit set op and it removes
         *  any existing filters for the vport
         */
        if (p_filter_cmd->opcode == QED_FILTER_ADD) {
                for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
-                       u32 bit;
+                       u32 bit, nbits;
 
                        bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-                       __set_bit(bit, bins);
+                       nbits = sizeof(u32) * BITS_PER_BYTE;
+                       bins[bit / nbits] |= 1 << (bit % nbits);
                }
 
                /* Convert to correct endianity */
                for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
                        struct vport_update_ramrod_mcast *p_ramrod_bins;
-                       u32 *p_bins = (u32 *)bins;
 
                        p_ramrod_bins = &p_ramrod->approx_mcast;
-                       p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+                       p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
                }
        }
 
index 806a8da..8d80f10 100644 (file)
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
        u8                              anti_spoofing_en;
        u8                              update_accept_any_vlan_flg;
        u8                              accept_any_vlan;
-       unsigned long                   bins[8];
+       u32                             bins[8];
        struct qed_rss_params           *rss_params;
        struct qed_filter_accept_flags  accept_flags;
        struct qed_sge_tpa_params       *sge_tpa_params;
index 9d9e533..cdd6450 100644 (file)
@@ -1211,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
                break;
        default:
                p_link->speed = 0;
+               p_link->link_up = 0;
        }
 
        if (p_link->link_up && p_link->speed)
@@ -1308,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
        phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
        phy_cfg.adv_speed = params->speed.advertised_speeds;
        phy_cfg.loopback_mode = params->loopback_mode;
-       if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
-               if (params->eee.enable)
-                       phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+
+       /* There are MFWs that share this capability regardless of whether
+        * this is feasible or not. And given that at the very least adv_caps
+        * would be set internally by qed, we want to make sure LFA would
+        * still work.
+        */
+       if ((p_hwfn->mcp_info->capabilities &
+            FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+               phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
                if (params->eee.tx_lpi_enable)
                        phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
                if (params->eee.adv_caps & QED_EEE_1G_ADV)
index fd59cf4..26e918d 100644 (file)
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
 
        p_data->update_approx_mcast_flg = 1;
        memcpy(p_data->bins, p_mcast_tlv->bins,
-              sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+              sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
 }
 
index 2d7fcd6..be6ddde 100644 (file)
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
                resp_size += sizeof(struct pfvf_def_resp_tlv);
 
                memcpy(p_mcast_tlv->bins, p_params->bins,
-                      sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+                      sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        }
 
        update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
                        u32 bit;
 
                        bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-                       __set_bit(bit, sp_params.bins);
+                       sp_params.bins[bit / 32] |= 1 << (bit % 32);
                }
        }
 
index 4f05d5e..033409d 100644 (file)
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
        struct channel_tlv tl;
        u8 padding[4];
 
-       u64 bins[8];
+       /* There are only 256 approx bins, and in HSI they're divided into
+        * 32-bit values. As old VFs used to set-bit to the values on its side,
+        * the upper half of the array is never expected to contain any data.
+        */
+       u64 bins[4];
+       u64 obsolete_bins[4];
 };
 
 struct vfpf_vport_update_accept_param_tlv {
index a3f6990..eaedc11 100644 (file)
@@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return rc;
        }
 
-       /* override BIOS settings, use userspace tools to enable WOL */
-       __rtl8169_set_wol(tp, 0);
+       tp->saved_wolopts = __rtl8169_get_wol(tp);
 
        if (rtl_tbi_enabled(tp)) {
                tp->set_speed = rtl8169_set_speed_tbi;
index 60f59ab..ef6a8d3 100644 (file)
@@ -53,7 +53,7 @@
 #include "dwmac1000.h"
 #include "hwif.h"
 
-#define STMMAC_ALIGN(x)        L1_CACHE_ALIGN(x)
+#define        STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
 #define        TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
 
 /* Module parameters */
index 8d375e5..6a393b1 100644 (file)
@@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        /* Enable pci device */
-       ret = pcim_enable_device(pdev);
+       ret = pci_enable_device(pdev);
        if (ret) {
                dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
                        __func__);
@@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
        stmmac_dvr_remove(&pdev->dev);
+       pci_disable_device(pdev);
 }
 
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
+static int stmmac_pci_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       ret = stmmac_suspend(dev);
+       if (ret)
+               return ret;
+
+       ret = pci_save_state(pdev);
+       if (ret)
+               return ret;
+
+       pci_disable_device(pdev);
+       pci_wake_from_d3(pdev, true);
+       return 0;
+}
+
+static int stmmac_pci_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       pci_restore_state(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       pci_set_master(pdev);
+
+       return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
 
 /* synthetic ID, no official vendor */
 #define PCI_VENDOR_ID_STMMAC 0x700
index 358edab..3e34cb8 100644 (file)
@@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
                int i;
 
                for (i = 0; i < cpsw->data.slaves; i++) {
-                       if (vid == cpsw->slaves[i].port_vlan)
-                               return -EINVAL;
+                       if (vid == cpsw->slaves[i].port_vlan) {
+                               ret = -EINVAL;
+                               goto err;
+                       }
                }
        }
 
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        ret = cpsw_add_vlan_ale_entry(priv, vid);
-
+err:
        pm_runtime_put(cpsw->dev);
        return ret;
 }
@@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
 
                for (i = 0; i < cpsw->data.slaves; i++) {
                        if (vid == cpsw->slaves[i].port_vlan)
-                               return -EINVAL;
+                               goto err;
                }
        }
 
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
-       if (ret != 0)
-               return ret;
-
-       ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
-                                HOST_PORT_NUM, ALE_VLAN, vid);
-       if (ret != 0)
-               return ret;
-
-       ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
-                                0, ALE_VLAN, vid);
+       ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+                                 HOST_PORT_NUM, ALE_VLAN, vid);
+       ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+                                 0, ALE_VLAN, vid);
+err:
        pm_runtime_put(cpsw->dev);
        return ret;
 }
index 93dc05c..5766225 100644 (file)
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
 
        idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
-               return -EINVAL;
+               return -ENOENT;
 
        cpsw_ale_read(ale, idx, ale_entry);
 
index 16c3bfb..757a3b3 100644 (file)
@@ -218,6 +218,7 @@ issue:
        ret = of_mdiobus_register(bus, np1);
        if (ret) {
                mdiobus_free(bus);
+               lp->mii_bus = NULL;
                return ret;
        }
        return 0;
index ba663e5..5135fc3 100644 (file)
@@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
                struct net *net = nsim_to_net(ns);
                bool *reg_devlink = net_generic(net, nsim_devlink_id);
 
+               devlink_resources_unregister(ns->devlink, NULL);
                devlink_unregister(ns->devlink);
                devlink_free(ns->devlink);
                ns->devlink = NULL;
index 0831b71..0c5b68e 100644 (file)
@@ -218,7 +218,7 @@ out:
 
 static int mdio_mux_iproc_remove(struct platform_device *pdev)
 {
-       struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+       struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
 
        mdio_mux_uninit(md->mux_handle);
        mdiobus_unregister(md->mii_bus);
index 537297d..6c9b24f 100644 (file)
@@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
         * negotiation may already be done and aneg interrupt may not be
         * generated.
         */
-       if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+       if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
                err = phy_aneg_done(phydev);
                if (err > 0) {
                        trigger = true;
index ed10d49..aeca484 100644 (file)
@@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                        mod_timer(&dev->stat_monitor,
                                  jiffies + STAT_UPDATE_TIMER);
                }
+
+               tasklet_schedule(&dev->bh);
        }
 
        return ret;
index 3850280..cb0cc30 100644 (file)
@@ -1246,7 +1246,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
-       {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e */
+       {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
index 53085c6..2b6ec92 100644 (file)
@@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                     struct receive_queue *rq,
                                     void *buf, void *ctx,
                                     unsigned int len,
-                                    unsigned int *xdp_xmit)
+                                    unsigned int *xdp_xmit,
+                                    unsigned int *rbytes)
 {
        struct sk_buff *skb;
        struct bpf_prog *xdp_prog;
@@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
        int err;
 
        len -= vi->hdr_len;
+       *rbytes += len;
 
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev,
                                   struct virtnet_info *vi,
                                   struct receive_queue *rq,
                                   void *buf,
-                                  unsigned int len)
+                                  unsigned int len,
+                                  unsigned int *rbytes)
 {
        struct page *page = buf;
        struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
 
+       *rbytes += len - vi->hdr_len;
        if (unlikely(!skb))
                goto err;
 
@@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         void *buf,
                                         void *ctx,
                                         unsigned int len,
-                                        unsigned int *xdp_xmit)
+                                        unsigned int *xdp_xmit,
+                                        unsigned int *rbytes)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
        u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        int err;
 
        head_skb = NULL;
+       *rbytes += len - vi->hdr_len;
 
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                        goto err_buf;
                }
 
+               *rbytes += len;
                page = virt_to_head_page(buf);
 
                truesize = mergeable_ctx_to_truesize(ctx);
@@ -932,6 +939,7 @@ err_skb:
                        dev->stats.rx_length_errors++;
                        break;
                }
+               *rbytes += len;
                page = virt_to_head_page(buf);
                put_page(page);
        }
@@ -942,14 +950,13 @@ xdp_xmit:
        return NULL;
 }
 
-static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-                      void *buf, unsigned int len, void **ctx,
-                      unsigned int *xdp_xmit)
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+                       void *buf, unsigned int len, void **ctx,
+                       unsigned int *xdp_xmit, unsigned int *rbytes)
 {
        struct net_device *dev = vi->dev;
        struct sk_buff *skb;
        struct virtio_net_hdr_mrg_rxbuf *hdr;
-       int ret;
 
        if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
                pr_debug("%s: short packet %i\n", dev->name, len);
@@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                } else {
                        put_page(virt_to_head_page(buf));
                }
-               return 0;
+               return;
        }
 
        if (vi->mergeable_rx_bufs)
-               skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
+               skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+                                       rbytes);
        else if (vi->big_packets)
-               skb = receive_big(dev, vi, rq, buf, len);
+               skb = receive_big(dev, vi, rq, buf, len, rbytes);
        else
-               skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
+               skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes);
 
        if (unlikely(!skb))
-               return 0;
+               return;
 
        hdr = skb_vnet_hdr(skb);
 
-       ret = skb->len;
-
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
@@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                 ntohs(skb->protocol), skb->len, skb->pkt_type);
 
        napi_gro_receive(&rq->napi, skb);
-       return ret;
+       return;
 
 frame_err:
        dev->stats.rx_frame_errors++;
        dev_kfree_skb(skb);
-       return 0;
 }
 
 /* Unlike mergeable buffers, all buffers are allocated to the
@@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 
                while (received < budget &&
                       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
-                       bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
+                       receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes);
                        received++;
                }
        } else {
                while (received < budget &&
                       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
-                       bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
+                       receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes);
                        received++;
                }
        }
index f6bb1d5..e857cb3 100644 (file)
@@ -636,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
-/* Add new entry to forwarding table -- assumes lock held */
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+                                        const u8 *mac, __u16 state,
+                                        __be32 src_vni, __u8 ndm_flags)
+{
+       struct vxlan_fdb *f;
+
+       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       f->state = state;
+       f->flags = ndm_flags;
+       f->updated = f->used = jiffies;
+       f->vni = src_vni;
+       INIT_LIST_HEAD(&f->remotes);
+       memcpy(f->eth_addr, mac, ETH_ALEN);
+
+       return f;
+}
+
 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+                           const u8 *mac, union vxlan_addr *ip,
+                           __u16 state, __be16 port, __be32 src_vni,
+                           __be32 vni, __u32 ifindex, __u8 ndm_flags,
+                           struct vxlan_fdb **fdb)
+{
+       struct vxlan_rdst *rd = NULL;
+       struct vxlan_fdb *f;
+       int rc;
+
+       if (vxlan->cfg.addrmax &&
+           vxlan->addrcnt >= vxlan->cfg.addrmax)
+               return -ENOSPC;
+
+       netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+       f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+       if (!f)
+               return -ENOMEM;
+
+       rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+       if (rc < 0) {
+               kfree(f);
+               return rc;
+       }
+
+       ++vxlan->addrcnt;
+       hlist_add_head_rcu(&f->hlist,
+                          vxlan_fdb_head(vxlan, mac, src_vni));
+
+       *fdb = f;
+
+       return 0;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
                            const u8 *mac, union vxlan_addr *ip,
                            __u16 state, __u16 flags,
                            __be16 port, __be32 src_vni, __be32 vni,
@@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               if (vxlan->cfg.addrmax &&
-                   vxlan->addrcnt >= vxlan->cfg.addrmax)
-                       return -ENOSPC;
-
                /* Disallow replace to add a multicast entry */
                if ((flags & NLM_F_REPLACE) &&
                    (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
                        return -EOPNOTSUPP;
 
                netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
-               f = kmalloc(sizeof(*f), GFP_ATOMIC);
-               if (!f)
-                       return -ENOMEM;
-
-               notify = 1;
-               f->state = state;
-               f->flags = ndm_flags;
-               f->updated = f->used = jiffies;
-               f->vni = src_vni;
-               INIT_LIST_HEAD(&f->remotes);
-               memcpy(f->eth_addr, mac, ETH_ALEN);
-
-               rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
-               if (rc < 0) {
-                       kfree(f);
+               rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+                                     vni, ifindex, ndm_flags, &f);
+               if (rc < 0)
                        return rc;
-               }
-
-               ++vxlan->addrcnt;
-               hlist_add_head_rcu(&f->hlist,
-                                  vxlan_fdb_head(vxlan, mac, src_vni));
+               notify = 1;
        }
 
        if (notify) {
@@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
        kfree(f);
 }
 
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                             bool do_notify)
 {
        netdev_dbg(vxlan->dev,
                    "delete %pM\n", f->eth_addr);
 
        --vxlan->addrcnt;
-       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+       if (do_notify)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
 
        hlist_del_rcu(&f->hlist);
        call_rcu(&f->rcu, vxlan_fdb_free);
@@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                return -EAFNOSUPPORT;
 
        spin_lock_bh(&vxlan->hash_lock);
-       err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+       err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
                               port, src_vni, vni, ifindex, ndm->ndm_flags);
        spin_unlock_bh(&vxlan->hash_lock);
 
@@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
                goto out;
        }
 
-       vxlan_fdb_destroy(vxlan, f);
+       vxlan_fdb_destroy(vxlan, f, true);
 
 out:
        return 0;
@@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
                /* close off race between vxlan_flush and incoming packets */
                if (netif_running(dev))
-                       vxlan_fdb_create(vxlan, src_mac, src_ip,
+                       vxlan_fdb_update(vxlan, src_mac, src_ip,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
                                         vxlan->cfg.dst_port,
@@ -2364,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
                                           "garbage collect %pM\n",
                                           f->eth_addr);
                                f->state = NUD_STALE;
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                        } else if (time_before(timeout, next_timer))
                                next_timer = timeout;
                }
@@ -2415,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
        spin_lock_bh(&vxlan->hash_lock);
        f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
        if (f)
-               vxlan_fdb_destroy(vxlan, f);
+               vxlan_fdb_destroy(vxlan, f, true);
        spin_unlock_bh(&vxlan->hash_lock);
 }
 
@@ -2469,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
                                continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
                        if (!is_zero_ether_addr(f->eth_addr))
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                }
        }
        spin_unlock_bh(&vxlan->hash_lock);
@@ -3160,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3173,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                       &vxlan->default_dst.remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
-                                      NLM_F_EXCL | NLM_F_CREATE,
                                       vxlan->cfg.dst_port,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
+                                      NTF_SELF, &f);
                if (err)
                        return err;
        }
 
        err = register_netdevice(dev);
+       if (err)
+               goto errout;
+
+       err = rtnl_configure_link(dev, NULL);
        if (err) {
-               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
-               return err;
+               unregister_netdevice(dev);
+               goto errout;
        }
 
+       /* notify default fdb entry */
+       if (f)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+
        list_add(&vxlan->next, &vn->vxlan_list);
        return 0;
+errout:
+       if (f)
+               vxlan_fdb_destroy(vxlan, f, false);
+       return err;
 }
 
 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3425,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct vxlan_rdst *dst = &vxlan->default_dst;
        struct vxlan_rdst old_dst;
        struct vxlan_config conf;
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_nl2conf(tb, data,
@@ -3453,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                        err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                               &dst->remote_ip,
                                               NUD_REACHABLE | NUD_PERMANENT,
-                                              NLM_F_CREATE | NLM_F_APPEND,
                                               vxlan->cfg.dst_port,
                                               dst->remote_vni,
                                               dst->remote_vni,
                                               dst->remote_ifindex,
-                                              NTF_SELF);
+                                              NTF_SELF, &f);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock);
                                return err;
                        }
+                       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
                }
                spin_unlock_bh(&vxlan->hash_lock);
        }
index 90a4ad9..b3a1b6f 100644 (file)
@@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
             case 0x001:
                 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
                 break;
-            case 0x010:
+            case 0x002:
                 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
                 break;
             default:
index 45928b5..4fffa69 100644 (file)
@@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
        fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
        fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
        fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
-       fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus);
+       /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
+       fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
        fwreq->bus_nr = devinfo->pdev->bus->number;
 
        return fwreq;
index e20c30b..c8ea63d 100644 (file)
@@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
        .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
+const struct iwl_cfg iwl9260_killer_2ac_cfg = {
+       .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
+       .fw_name_pre = IWL9260A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
 const struct iwl_cfg iwl9270_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 9270",
        .fw_name_pre = IWL9260A_FW_PRE,
@@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
        .soc_latency = 5000,
 };
 
+const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
+       .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
+       .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+};
+
 const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
        .name = "Intel(R) Dual Band Wireless AC 9460",
        .fw_name_pre = IWL9000A_FW_PRE,
@@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
        .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
 };
 
+const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
+       .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+       .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
+       .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+       .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
 MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index c503b26..84a8168 100644 (file)
@@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
 extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl9160_2ac_cfg;
 extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
 extern const struct iwl_cfg iwl9270_2ac_cfg;
 extern const struct iwl_cfg iwl9460_2ac_cfg;
 extern const struct iwl_cfg iwl9560_2ac_cfg;
@@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
index 38234bd..8520523 100644 (file)
@@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
@@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
@@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
+       {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
@@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
index a57daec..9dd2ca6 100644 (file)
@@ -87,6 +87,7 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
+static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
 static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
 
 struct netfront_stats {
@@ -893,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                                  struct sk_buff *skb,
                                  struct sk_buff_head *list)
 {
-       struct skb_shared_info *shinfo = skb_shinfo(skb);
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *nskb;
 
@@ -902,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        RING_GET_RESPONSE(&queue->rx, ++cons);
                skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 
-               if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+               if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
                        unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
                        BUG_ON(pull_to <= skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
-               BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+               BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 
-               skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               skb_frag_page(nfrag),
                                rx->offset, rx->status, PAGE_SIZE);
 
                skb_shinfo(nskb)->nr_frags = 0;
@@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netif_carrier_off(netdev);
 
        xenbus_switch_state(dev, XenbusStateInitialising);
+       wait_event(module_load_q,
+                          xenbus_read_driver_state(dev->otherend) !=
+                          XenbusStateClosed &&
+                          xenbus_read_driver_state(dev->otherend) !=
+                          XenbusStateUnknown);
        return netdev;
 
  exit:
index a59b6c4..ad3d17c 100644 (file)
@@ -5,6 +5,7 @@
 // Copyright (C) 2017 Finn Thain
 
 #include <linux/device.h>
+#include <linux/dma-mapping.h>
 #include <linux/list.h>
 #include <linux/nubus.h>
 #include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
        board->dev.release = nubus_device_release;
        board->dev.bus = &nubus_bus_type;
        dev_set_name(&board->dev, "slot.%X", board->slot);
+       board->dev.dma_mask = &board->dev.coherent_dma_mask;
+       dma_set_mask(&board->dev, DMA_BIT_MASK(32));
        return device_register(&board->dev);
 }
 
index 903eb45..f7efe5a 100644 (file)
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
 /*
  * For something we're not in a state to send to the device the default action
  * is to busy it and retry it after the controller state is recovered.  However,
- * anything marked for failfast or nvme multipath is immediately failed.
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
  *
  * Note: commands used to initialize the controller will be marked for failfast.
  * Note: nvme cli/ioctl commands are marked for failfast.
  */
-blk_status_t nvmf_fail_nonready_command(struct request *rq)
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq)
 {
-       if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+       if (ctrl->state != NVME_CTRL_DELETING &&
+           ctrl->state != NVME_CTRL_DEAD &&
+           !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
                return BLK_STS_RESOURCE;
        nvme_req(rq)->status = NVME_SC_ABORT_REQ;
        return BLK_STS_IOERR;
index e1818a2..aa2fdb2 100644 (file)
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct request *rq);
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq);
 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                bool queue_live);
 
index 41d45a1..9bac912 100644 (file)
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
            !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-               return nvmf_fail_nonready_command(rq);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
index 518c5b0..66ec598 100644 (file)
@@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        WARN_ON_ONCE(rq->tag < 0);
 
        if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-               return nvmf_fail_nonready_command(rq);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
index d3f3b3e..ebea137 100644 (file)
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
 {
        struct nvmet_ns *ns = to_nvmet_ns(item);
        struct nvmet_subsys *subsys = ns->subsys;
+       size_t len;
        int ret;
 
        mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
        if (ns->enabled)
                goto out_unlock;
 
-       kfree(ns->device_path);
+       ret = -EINVAL;
+       len = strcspn(page, "\n");
+       if (!len)
+               goto out_unlock;
 
+       kfree(ns->device_path);
        ret = -ENOMEM;
-       ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
+       ns->device_path = kstrndup(page, len, GFP_KERNEL);
        if (!ns->device_path)
                goto out_unlock;
 
index 74d4b78..9838103 100644 (file)
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
                goto out_unlock;
 
        ret = nvmet_bdev_ns_enable(ns);
-       if (ret)
+       if (ret == -ENOTBLK)
                ret = nvmet_file_ns_enable(ns);
        if (ret)
                goto out_unlock;
index 408279c..29b4b23 100644 (file)
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
        struct work_struct              work;
 } __aligned(sizeof(unsigned long long));
 
+/* desired maximum for a single sequence - if sg list allows it */
 #define NVMET_FC_MAX_SEQ_LENGTH                (256 * 1024)
-#define NVMET_FC_MAX_XFR_SGENTS                (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
 
 enum nvmet_fcp_datadir {
        NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
        struct nvme_fc_cmd_iu           cmdiubuf;
        struct nvme_fc_ersp_iu          rspiubuf;
        dma_addr_t                      rspdma;
+       struct scatterlist              *next_sg;
        struct scatterlist              *data_sg;
        int                             data_sg_cnt;
        u32                             offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
        INIT_LIST_HEAD(&newrec->assoc_list);
        kref_init(&newrec->ref);
        ida_init(&newrec->assoc_cnt);
-       newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
-                                       template->max_sgl_segments);
+       newrec->max_sg_cnt = template->max_sgl_segments;
 
        ret = nvmet_fc_alloc_ls_iodlist(newrec);
        if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
                                ((fod->io_dir == NVMET_FCP_WRITE) ?
                                        DMA_FROM_DEVICE : DMA_TO_DEVICE));
                                /* note: write from initiator perspective */
+       fod->next_sg = fod->data_sg;
 
        return 0;
 
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
                                struct nvmet_fc_fcp_iod *fod, u8 op)
 {
        struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+       struct scatterlist *sg = fod->next_sg;
        unsigned long flags;
-       u32 tlen;
+       u32 remaininglen = fod->req.transfer_len - fod->offset;
+       u32 tlen = 0;
        int ret;
 
        fcpreq->op = op;
        fcpreq->offset = fod->offset;
        fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
 
-       tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
-                       (fod->req.transfer_len - fod->offset));
+       /*
+        * for next sequence:
+        *  break at a sg element boundary
+        *  attempt to keep sequence length capped at
+        *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+        *    be longer if a single sg element is larger
+        *    than that amount. This is done to avoid creating
+        *    a new sg list to use for the tgtport api.
+        */
+       fcpreq->sg = sg;
+       fcpreq->sg_cnt = 0;
+       while (tlen < remaininglen &&
+              fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+              tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+               fcpreq->sg_cnt++;
+               tlen += sg_dma_len(sg);
+               sg = sg_next(sg);
+       }
+       if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+               fcpreq->sg_cnt++;
+               tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+               sg = sg_next(sg);
+       }
+       if (tlen < remaininglen)
+               fod->next_sg = sg;
+       else
+               fod->next_sg = NULL;
+
        fcpreq->transfer_length = tlen;
        fcpreq->transferred_length = 0;
        fcpreq->fcp_error = 0;
        fcpreq->rsplen = 0;
 
-       fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
-       fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
-
        /*
         * If the last READDATA request: check if LLDD supports
         * combined xfr with response.
index d8d91f0..ae7586b 100644 (file)
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t ret;
 
        if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
-               return nvmf_fail_nonready_command(req);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
index 35b7fc8..5cb40b2 100644 (file)
@@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev)
                return;
        }
 
-       dev->is_added = 1;
+       pci_dev_assign_added(dev, true);
 }
 EXPORT_SYMBOL_GPL(pci_bus_add_device);
 
@@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus)
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                /* Skip already-added devices */
-               if (dev->is_added)
+               if (pci_dev_is_added(dev))
                        continue;
                pci_bus_add_device(dev);
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                /* Skip if device attach failed */
-               if (!dev->is_added)
+               if (!pci_dev_is_added(dev))
                        continue;
                child = dev->subordinate;
                if (child)
index 4d6c20e..cf0aa7c 100644 (file)
 #define CFG_WINDOW_TYPE        0
 #define IO_WINDOW_TYPE         1
 #define MEM_WINDOW_TYPE        2
-#define IB_WIN_SIZE            (256 * 1024 * 1024 * 1024)
+#define IB_WIN_SIZE            ((u64)256 * 1024 * 1024 * 1024)
 #define MAX_PIO_WINDOWS        8
 
 /* Parameters for the waiting for link up routine */
index 3a17b29..ef0b1b6 100644 (file)
@@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
                /* Assume that newly added devices are powered on already. */
-               if (!dev->is_added)
+               if (!pci_dev_is_added(dev))
                        dev->current_state = PCI_D0;
        }
 
index 882f1f9..0881725 100644 (file)
@@ -288,6 +288,7 @@ struct pci_sriov {
 
 /* pci_dev priv_flags */
 #define PCI_DEV_DISCONNECTED 0
+#define PCI_DEV_ADDED 1
 
 static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
 {
@@ -300,6 +301,16 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
        return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
 }
 
+static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
+{
+       assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
+}
+
+static inline bool pci_dev_is_added(const struct pci_dev *dev)
+{
+       return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
+}
+
 #ifdef CONFIG_PCI_ATS
 void pci_restore_ats_state(struct pci_dev *dev);
 #else
index f7ce0cb..f02e334 100644 (file)
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
 
        parent = udev->subordinate;
        pci_lock_rescan_remove();
+       pci_dev_get(dev);
        list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
                                         bus_list) {
                pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
                pci_info(dev, "Device recovery from fatal error failed\n");
        }
 
+       pci_dev_put(dev);
        pci_unlock_rescan_remove();
 }
 
index ac876e3..611adcd 100644 (file)
@@ -2433,13 +2433,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
        dev = pci_scan_single_device(bus, devfn);
        if (!dev)
                return 0;
-       if (!dev->is_added)
+       if (!pci_dev_is_added(dev))
                nr++;
 
        for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
                dev = pci_scan_single_device(bus, devfn + fn);
                if (dev) {
-                       if (!dev->is_added)
+                       if (!pci_dev_is_added(dev))
                                nr++;
                        dev->multifunction = 1;
                }
index 6f072ea..5e3d0dc 100644 (file)
@@ -19,11 +19,12 @@ static void pci_stop_dev(struct pci_dev *dev)
 {
        pci_pme_active(dev, false);
 
-       if (dev->is_added) {
+       if (pci_dev_is_added(dev)) {
                device_release_driver(&dev->dev);
                pci_proc_detach_device(dev);
                pci_remove_sysfs_dev_files(dev);
-               dev->is_added = 0;
+
+               pci_dev_assign_added(dev, false);
        }
 
        if (dev->bus->self)
index 1b7febc..29d2c3b 100644 (file)
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
 {
        void __iomem *ctrl = params->ctrl_regs;
 
+       USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+       /* 1 millisecond - for USB clocks to settle down */
+       usleep_range(1000, 2000);
+
        if (BRCM_ID(params->family_id) == 0x7366) {
                /*
                 * The PHY3_SOFT_RESETB bits default to the wrong state.
index 23705e1..0075fb0 100644 (file)
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
        ddata = container_of(work, struct phy_mdm6600, status_work.work);
        dev = ddata->dev;
 
-       error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
+       error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
                                               ddata->status_gpios->desc,
                                               values);
        if (error)
                return;
 
-       for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
+       for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
                val |= values[i] << i;
                dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
                        __func__, i, values[i], val);
index ea23c8d..ffec695 100644 (file)
@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
        case ELS_LOGO:
                if (fip->mode == FIP_MODE_VN2VN) {
                        if (fip->state != FIP_ST_VNMP_UP)
-                               return -EINVAL;
+                               goto drop;
                        if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
-                               return -EINVAL;
+                               goto drop;
                } else {
                        if (fip->state != FIP_ST_ENABLED)
                                return 0;
@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
        fip->send(fip, skb);
        return -EINPROGRESS;
 drop:
-       kfree_skb(skb);
        LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
                        op, ntoh24(fh->fh_d_id));
+       kfree_skb(skb);
        return -EINVAL;
 }
 EXPORT_SYMBOL(fcoe_ctlr_els_send);
index 31d31aa..89b1f1a 100644 (file)
@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
+               rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, fc_rport_destroy);
index d609383..c972cc2 100644 (file)
@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
                 */
                if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
                        iscsi_conn_printk(KERN_INFO, conn,
-                                         "task [op %x/%x itt "
+                                         "task [op %x itt "
                                          "0x%x/0x%x] "
                                          "rejected.\n",
-                                         task->hdr->opcode, opcode,
-                                         task->itt, task->hdr_itt);
+                                         opcode, task->itt,
+                                         task->hdr_itt);
                        return -EACCES;
                }
                /*
@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
                 */
                if (conn->session->fast_abort) {
                        iscsi_conn_printk(KERN_INFO, conn,
-                                         "task [op %x/%x itt "
+                                         "task [op %x itt "
                                          "0x%x/0x%x] fast abort.\n",
-                                         task->hdr->opcode, opcode,
-                                         task->itt, task->hdr_itt);
+                                         opcode, task->itt,
+                                         task->hdr_itt);
                        return -EACCES;
                }
                break;
index 569392d..e44c91e 100644 (file)
@@ -3343,11 +3343,10 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
                                        spinlock_t *writeq_lock)
 {
        unsigned long flags;
-       __u64 data_out = b;
 
        spin_lock_irqsave(writeq_lock, flags);
-       writel((u32)(data_out), addr);
-       writel((u32)(data_out >> 32), (addr + 4));
+       __raw_writel((u32)(b), addr);
+       __raw_writel((u32)(b >> 32), (addr + 4));
        mmiowb();
        spin_unlock_irqrestore(writeq_lock, flags);
 }
@@ -3367,7 +3366,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
-       writeq(b, addr);
+       __raw_writeq(b, addr);
+       mmiowb();
 }
 #else
 static inline void
@@ -5268,7 +5268,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        /* send message 32-bits at a time */
        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
-               writel((u32)(request[i]), &ioc->chip->Doorbell);
+               writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
                if ((_base_wait_for_doorbell_ack(ioc, 5)))
                        failed = 1;
        }
@@ -5289,7 +5289,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
 
        /* read the first two 16-bits, it gives the total length of the reply */
-       reply[0] = (u16)(readl(&ioc->chip->Doorbell)
+       reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
        if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5298,7 +5298,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                        ioc->name, __LINE__);
                return -EFAULT;
        }
-       reply[1] = (u16)(readl(&ioc->chip->Doorbell)
+       reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5312,7 +5312,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                if (i >=  reply_bytes/2) /* overflow case */
                        readl(&ioc->chip->Doorbell);
                else
-                       reply[i] = (u16)(readl(&ioc->chip->Doorbell)
+                       reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
                            & MPI2_DOORBELL_DATA_MASK);
                writel(0, &ioc->chip->HostInterruptStatus);
        }
index 091ec12..cff83b9 100644 (file)
@@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
        ipv6_en = !!(block->generic.ctrl_flags &
                     NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
 
-       snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+       snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
                 block->target[index].target_name.byte);
 
        tgt->ipv6_en = ipv6_en;
index 89a4999..c873156 100644 (file)
@@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                msleep(1000);
 
        qla24xx_disable_vp(vha);
+       qla2x00_wait_for_sess_deletion(vha);
 
        vha->flags.delete_progress = 1;
 
index f68eb60..2660a48 100644 (file)
@@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_async_abort_cmd(srb_t *);
 int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
+void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
 
 /*
  * Global Functions in qla_mid.c source file.
index 2c35b0b..7a37440 100644 (file)
@@ -3708,6 +3708,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        return rval;
 
 done_free_sp:
+       spin_lock_irqsave(&vha->hw->vport_slock, flags);
+       list_del(&sp->elem);
+       spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
                        sizeof(struct ct_sns_pkt),
index db0e327..1b19b95 100644 (file)
@@ -1489,11 +1489,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 
        wait_for_completion(&tm_iocb->u.tmf.comp);
 
-       rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
-           QLA_SUCCESS : QLA_FUNCTION_FAILED;
+       rval = tm_iocb->u.tmf.data;
 
-       if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
-               ql_dbg(ql_dbg_taskm, vha, 0x8030,
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x8030,
                    "TM IOCB failed (%x).\n", rval);
        }
 
index 37ae0f6..59fd5a9 100644 (file)
@@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
        sp->fcport = fcport;
        sp->iocbs = 1;
        sp->vha = qpair->vha;
+       INIT_LIST_HEAD(&sp->elem);
+
 done:
        if (!sp)
                QLA_QPAIR_MARK_NOT_BUSY(qpair);
index a91cca5..dd93a22 100644 (file)
@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
        req_cnt = 1;
        handle = 0;
 
-       if (!sp)
-               goto skip_cmd_array;
-
-       /* Check for room in outstanding command list. */
-       handle = req->current_outstanding_cmd;
-       for (index = 1; index < req->num_outstanding_cmds; index++) {
-               handle++;
-               if (handle == req->num_outstanding_cmds)
-                       handle = 1;
-               if (!req->outstanding_cmds[handle])
-                       break;
-       }
-       if (index == req->num_outstanding_cmds) {
-               ql_log(ql_log_warn, vha, 0x700b,
-                   "No room on outstanding cmd array.\n");
-               goto queuing_error;
-       }
-
-       /* Prep command array. */
-       req->current_outstanding_cmd = handle;
-       req->outstanding_cmds[handle] = sp;
-       sp->handle = handle;
-
-       /* Adjust entry-counts as needed. */
-       if (sp->type != SRB_SCSI_CMD)
+       if (sp && (sp->type != SRB_SCSI_CMD)) {
+               /* Adjust entry-counts as needed. */
                req_cnt = sp->iocbs;
+       }
 
-skip_cmd_array:
        /* Check for room on request queue. */
        if (req->cnt < req_cnt + 2) {
                if (qpair->use_shadow_reg)
@@ -2183,6 +2160,28 @@ skip_cmd_array:
        if (req->cnt < req_cnt + 2)
                goto queuing_error;
 
+       if (sp) {
+               /* Check for room in outstanding command list. */
+               handle = req->current_outstanding_cmd;
+               for (index = 1; index < req->num_outstanding_cmds; index++) {
+                       handle++;
+                       if (handle == req->num_outstanding_cmds)
+                               handle = 1;
+                       if (!req->outstanding_cmds[handle])
+                               break;
+               }
+               if (index == req->num_outstanding_cmds) {
+                       ql_log(ql_log_warn, vha, 0x700b,
+                           "No room on outstanding cmd array.\n");
+                       goto queuing_error;
+               }
+
+               /* Prep command array. */
+               req->current_outstanding_cmd = handle;
+               req->outstanding_cmds[handle] = sp;
+               sp->handle = handle;
+       }
+
        /* Prep packet */
        req->cnt -= req_cnt;
        pkt = req->ring_ptr;
@@ -2195,6 +2194,8 @@ skip_cmd_array:
                pkt->handle = handle;
        }
 
+       return pkt;
+
 queuing_error:
        qpair->tgt_counters.num_alloc_iocb_failed++;
        return pkt;
index 9fa5a25..7756106 100644 (file)
@@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
        unsigned long   flags;
        fc_port_t       *fcport = NULL;
 
+       if (!vha->hw->flags.fw_started)
+               return;
+
        /* Setup to process RIO completion. */
        handle_cnt = 0;
        if (IS_CNA_CAPABLE(ha))
index 7e875f5..f0ec13d 100644 (file)
@@ -4220,6 +4220,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
            "Entered %s.\n", __func__);
 
@@ -4289,6 +4292,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
            "Entered %s.\n", __func__);
 
index f6f0a75..aa727d0 100644 (file)
@@ -152,11 +152,18 @@ int
 qla24xx_disable_vp(scsi_qla_host_t *vha)
 {
        unsigned long flags;
-       int ret;
+       int ret = QLA_SUCCESS;
+       fc_port_t *fcport;
+
+       if (vha->hw->flags.fw_started)
+               ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 
-       ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
        atomic_set(&vha->loop_state, LOOP_DOWN);
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+       list_for_each_entry(fcport, &vha->vp_fcports, list)
+               fcport->logout_on_delete = 0;
+
+       qla2x00_mark_all_devices_lost(vha, 0);
 
        /* Remove port id from vp target map */
        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
index 9f309e5..1fbd16c 100644 (file)
@@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
 static int qla2xxx_map_queues(struct Scsi_Host *shost);
 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
+
 struct scsi_host_template qla2xxx_driver_template = {
        .module                 = THIS_MODULE,
        .name                   = QLA2XXX_DRIVER_NAME,
@@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
  * qla2x00_wait_for_sess_deletion can only be called from remove_one.
  * it has dependency on UNLOADING flag to stop device discovery
  */
-static void
+void
 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
        qla2x00_mark_all_devices_lost(vha, 0);
@@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
+       ql_log(ql_log_info, base_vha, 0xb079,
+           "Removing driver\n");
 
        /* Indicate device removal to prevent future board_disable and wait
         * until any pending board_disable has completed. */
@@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
+       if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+               if (ha->flags.fw_started)
+                       qla2x00_abort_isp_cleanup(base_vha);
+       } else if (!IS_QLAFX00(ha)) {
+               if (IS_QLA8031(ha)) {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+                           "Clearing fcoe driver presence.\n");
+                       if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+                               ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+                                   "Error while clearing DRV-Presence.\n");
+               }
+
+               qla2x00_try_to_stop_firmware(base_vha);
+       }
+
        qla2x00_wait_for_sess_deletion(base_vha);
 
        /*
@@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        qla2x00_delete_all_vps(ha, base_vha);
 
-       if (IS_QLA8031(ha)) {
-               ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
-                   "Clearing fcoe driver presence.\n");
-               if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
-                       ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
-                           "Error while clearing DRV-Presence.\n");
-       }
-
        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
 
        qla2x00_dfs_remove(base_vha);
@@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
                qla2x00_stop_timer(vha);
 
        qla25xx_delete_queues(vha);
-
-       if (ha->flags.fce_enabled)
-               qla2x00_disable_fce_trace(vha, NULL, NULL);
-
-       if (ha->eft)
-               qla2x00_disable_eft_trace(vha);
-
-       if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha)) {
-               if (ha->flags.fw_started)
-                       qla2x00_abort_isp_cleanup(vha);
-       } else {
-               if (ha->flags.fw_started) {
-                       /* Stop currently executing firmware. */
-                       qla2x00_try_to_stop_firmware(vha);
-                       ha->flags.fw_started = 0;
-               }
-       }
-
        vha->flags.online = 0;
 
        /* turn-off interrupts on the card */
@@ -6028,8 +6020,9 @@ qla2x00_do_dpc(void *data)
                                set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
                }
 
-               if (test_and_clear_bit(ISP_ABORT_NEEDED,
-                                               &base_vha->dpc_flags)) {
+               if (test_and_clear_bit
+                   (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+                   !test_bit(UNLOADING, &base_vha->dpc_flags)) {
 
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
                            "ISP abort scheduled.\n");
index 04458eb..4499c78 100644 (file)
@@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
        if (IS_P3P_TYPE(ha))
                return QLA_SUCCESS;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ha->beacon_blink_led = 0;
 
        if (IS_QLA2031(ha) || IS_QLA27XX(ha))
index 8932ae8..2715cda 100644 (file)
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
                rtn = host->hostt->eh_timed_out(scmd);
 
        if (rtn == BLK_EH_DONE) {
+               /*
+                * For blk-mq, we must set the request state to complete now
+                * before sending the request to the scsi error handler. This
+                * will prevent a use-after-free in the event the LLD manages
+                * to complete the request before the error handler finishes
+                * processing this timed out request.
+                *
+                * If the request was already completed, then the LLD beat the
+                * time out handler from transferring the request to the scsi
+                * error handler. In that case we can return immediately as no
+                * further action is required.
+                */
+               if (req->q->mq_ops && !blk_mq_mark_complete(req))
+                       return rtn;
                if (scsi_abort_command(scmd) != SUCCESS) {
                        set_host_byte(scmd, DID_TIME_OUT);
                        scsi_eh_scmd_add(scmd);
index cd2fdac..ba9ba0e 100644 (file)
@@ -1741,15 +1741,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
         *
         * With scsi-mq enabled, there are a fixed number of preallocated
         * requests equal in number to shost->can_queue.  If all of the
-        * preallocated requests are already in use, then using GFP_ATOMIC with
-        * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
-        * will cause blk_get_request() to sleep until an active command
-        * completes, freeing up a request.  Neither option is ideal, but
-        * GFP_KERNEL is the better choice to prevent userspace from getting an
-        * unexpected EWOULDBLOCK.
-        *
-        * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
-        * does not sleep except under memory pressure.
+        * preallocated requests are already in use, then blk_get_request()
+        * will sleep until an active command completes, freeing up a request.
+        * Although waiting in an asynchronous interface is less than ideal, we
+        * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
+        * not expect an EWOULDBLOCK from this condition.
         */
        rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
                        REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
@@ -2185,6 +2181,7 @@ sg_add_sfp(Sg_device * sdp)
        write_lock_irqsave(&sdp->sfd_lock, iflags);
        if (atomic_read(&sdp->detaching)) {
                write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+               kfree(sfp);
                return ERR_PTR(-ENODEV);
        }
        list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
index 3f3cb72..d0389b2 100644 (file)
@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
 static int sr_block_open(struct block_device *bdev, fmode_t mode)
 {
        struct scsi_cd *cd;
+       struct scsi_device *sdev;
        int ret = -ENXIO;
 
+       cd = scsi_cd_get(bdev->bd_disk);
+       if (!cd)
+               goto out;
+
+       sdev = cd->device;
+       scsi_autopm_get_device(sdev);
        check_disk_change(bdev);
 
        mutex_lock(&sr_mutex);
-       cd = scsi_cd_get(bdev->bd_disk);
-       if (cd) {
-               ret = cdrom_open(&cd->cdi, bdev, mode);
-               if (ret)
-                       scsi_cd_put(cd);
-       }
+       ret = cdrom_open(&cd->cdi, bdev, mode);
        mutex_unlock(&sr_mutex);
+
+       scsi_autopm_put_device(sdev);
+       if (ret)
+               scsi_cd_put(cd);
+
+out:
        return ret;
 }
 
@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        if (ret)
                goto out;
 
+       scsi_autopm_get_device(sdev);
+
        /*
         * Send SCSI addressing ioctls directly to mid level, send other
         * ioctls to cdrom/block level.
@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case SCSI_IOCTL_GET_IDLUN:
        case SCSI_IOCTL_GET_BUS_NUMBER:
                ret = scsi_ioctl(sdev, cmd, argp);
-               goto out;
+               goto put;
        }
 
        ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
        if (ret != -ENOSYS)
-               goto out;
+               goto put;
 
        ret = scsi_ioctl(sdev, cmd, argp);
 
+put:
+       scsi_autopm_put_device(sdev);
+
 out:
        mutex_unlock(&sr_mutex);
        return ret;
index 777e5f1..0cd947f 100644 (file)
@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
            (btstat == BTSTAT_SUCCESS ||
             btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
             btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
-               cmd->result = (DID_OK << 16) | sdstat;
-               if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
-                       cmd->result |= (DRIVER_SENSE << 24);
+               if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+                       cmd->result = (DID_RESET << 16);
+               } else {
+                       cmd->result = (DID_OK << 16) | sdstat;
+                       if (sdstat == SAM_STAT_CHECK_CONDITION &&
+                           cmd->sense_buffer)
+                               cmd->result |= (DRIVER_SENSE << 24);
+               }
        } else
                switch (btstat) {
                case BTSTAT_SUCCESS:
index a1a0025..d5d33e1 100644 (file)
@@ -402,6 +402,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
                        fput(asma->file);
                        goto out;
                }
+       } else {
+               vma_set_anonymous(vma);
        }
 
        if (vma->vm_file)
index 0ecffab..abdaf7c 100644 (file)
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
        memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
 
        if (dev->flags & IFF_PROMISC) {
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_PROMISC);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_PROMISC);
                goto spin_unlock;
        }
 
        if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
            (dev->flags & IFF_ALLMULTI)) {
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_MCASTALL);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_MCASTALL);
                goto spin_unlock;
        }
 
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
                                               ETH_ALEN * mc_count);
        } else {
                priv->sme_i.sme_flag |= SME_MULTICAST;
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_MCAST);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_MCAST);
        }
 
 spin_unlock:
index a3a8342..16478fe 100644 (file)
@@ -11,7 +11,6 @@
  * (at your option) any later version.
  */
 
-#include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -24,6 +23,8 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mc.h>
 
+#include <asm/cacheflush.h>
+
 #include "iss_video.h"
 #include "iss.h"
 
index 673fdce..ff78327 100644 (file)
@@ -7,7 +7,6 @@ config R8188EU
        select LIB80211
        select LIB80211_CRYPT_WEP
        select LIB80211_CRYPT_CCMP
-       select LIB80211_CRYPT_TKIP
        ---help---
        This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
        If built as a module, it will be called r8188eu.
index 05936a4..c6857a5 100644 (file)
@@ -23,7 +23,6 @@
 #include <mon.h>
 #include <wifi.h>
 #include <linux/vmalloc.h>
-#include <net/lib80211.h>
 
 #define ETHERNET_HEADER_SIZE   14      /*  Ethernet Header Length */
 #define LLC_HEADER_SIZE                        6       /*  LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
 static int recvframe_chkmic(struct adapter *adapter,
                            struct recv_frame *precvframe)
 {
-       int res = _SUCCESS;
-       struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
-       struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta);
+       int     i, res = _SUCCESS;
+       u32     datalen;
+       u8      miccode[8];
+       u8      bmic_err = false, brpt_micerror = true;
+       u8      *pframe, *payload, *pframemic;
+       u8      *mickey;
+       struct  sta_info                *stainfo;
+       struct  rx_pkt_attrib   *prxattrib = &precvframe->attrib;
+       struct  security_priv   *psecuritypriv = &adapter->securitypriv;
+
+       struct mlme_ext_priv    *pmlmeext = &adapter->mlmeextpriv;
+       struct mlme_ext_info    *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+       stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
 
        if (prxattrib->encrypt == _TKIP_) {
+               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                        ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                        ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                         __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+                         prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+               /* calculate mic code */
                if (stainfo) {
-                       int key_idx;
-                       const int iv_len = 8, icv_len = 4, key_length = 32;
-                       struct sk_buff *skb = precvframe->pkt;
-                       u8 key[32], iv[8], icv[4], *pframe = skb->data;
-                       void *crypto_private = NULL;
-                       struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
-                       struct security_priv *psecuritypriv = &adapter->securitypriv;
-
                        if (IS_MCAST(prxattrib->ra)) {
                                if (!psecuritypriv) {
                                        res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
                                        DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
                                        goto exit;
                                }
-                               key_idx = prxattrib->key_index;
-                               memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
-                               memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+                               mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                                        ("\n %s: bcmc key\n", __func__));
                        } else {
-                               key_idx = 0;
-                               memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
-                               memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+                               mickey = &stainfo->dot11tkiprxmickey.skey[0];
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n %s: unicast key\n", __func__));
                        }
 
-                       if (!crypto_ops) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       /* icv_len included the mic code */
+                       datalen = precvframe->pkt->len-prxattrib->hdrlen -
+                                 prxattrib->iv_len-prxattrib->icv_len-8;
+                       pframe = precvframe->pkt->data;
+                       payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
 
-                       memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
-                       memcpy(icv, pframe + skb->len - icv_len, icv_len);
-                       memmove(pframe + iv_len, pframe, prxattrib->hdrlen);
+                       RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+                       rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+                                          (unsigned char)prxattrib->priority); /* care the length of the data */
 
-                       skb_pull(skb, iv_len);
-                       skb_trim(skb, skb->len - icv_len);
+                       pframemic = payload+datalen;
 
-                       crypto_private = crypto_ops->init(key_idx);
-                       if (!crypto_private) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
+                       bmic_err = false;
+
+                       for (i = 0; i < 8; i++) {
+                               if (miccode[i] != *(pframemic+i)) {
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                                ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+                                                 __func__, i, miccode[i], i, *(pframemic + i)));
+                                       bmic_err = true;
+                               }
                        }
 
-                       memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
-                       skb_push(skb, iv_len);
-                       skb_put(skb, icv_len);
+                       if (bmic_err) {
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                                        *(pframemic-8), *(pframemic-7), *(pframemic-6),
+                                        *(pframemic-5), *(pframemic-4), *(pframemic-3),
+                                        *(pframemic-2), *(pframemic-1)));
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                                        *(pframemic-16), *(pframemic-15), *(pframemic-14),
+                                        *(pframemic-13), *(pframemic-12), *(pframemic-11),
+                                        *(pframemic-10), *(pframemic-9)));
+                               {
+                                       uint i;
 
-                       memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
-                       memcpy(pframe + skb->len - icv_len, icv, icv_len);
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                                ("\n ======demp packet (len=%d)======\n",
+                                                precvframe->pkt->len));
+                                       for (i = 0; i < precvframe->pkt->len; i += 8) {
+                                               RT_TRACE(_module_rtl871x_recv_c_,
+                                                        _drv_err_,
+                                                        ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+                                                        *(precvframe->pkt->data+i),
+                                                        *(precvframe->pkt->data+i+1),
+                                                        *(precvframe->pkt->data+i+2),
+                                                        *(precvframe->pkt->data+i+3),
+                                                        *(precvframe->pkt->data+i+4),
+                                                        *(precvframe->pkt->data+i+5),
+                                                        *(precvframe->pkt->data+i+6),
+                                                        *(precvframe->pkt->data+i+7)));
+                                       }
+                                       RT_TRACE(_module_rtl871x_recv_c_,
+                                                _drv_err_,
+                                                ("\n ====== demp packet end [len=%d]======\n",
+                                                precvframe->pkt->len));
+                                       RT_TRACE(_module_rtl871x_recv_c_,
+                                                _drv_err_,
+                                                ("\n hrdlen=%d,\n",
+                                                prxattrib->hdrlen));
+                               }
 
-exit_lib80211_tkip:
-                       if (crypto_ops && crypto_private)
-                               crypto_ops->deinit(crypto_private);
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+                                        prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+                                        prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+                               /*  double check key_index for some timing issue , */
+                               /*  cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+                               if ((IS_MCAST(prxattrib->ra) == true)  && (prxattrib->key_index != pmlmeinfo->key_index))
+                                       brpt_micerror = false;
+
+                               if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+                                       rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+                                       DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+                               } else {
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+                                       DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+                               }
+                               res = _FAIL;
+                       } else {
+                               /* mic checked ok */
+                               if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+                                       psecuritypriv->bcheck_grpkey = true;
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+                               }
+                       }
                } else {
                        RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
                                 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
                }
+
+               skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
        }
 
 exit:
+
        return res;
 }
 
index bfe0b21..67a2490 100644 (file)
@@ -650,71 +650,71 @@ u32       rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
        return res;
 }
 
+/* The hlen isn't include the IV */
 u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
-{
-       struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
-       u32 res = _SUCCESS;
+{                                                                                                                                      /*  exclude ICV */
+       u16 pnl;
+       u32 pnh;
+       u8   rc4key[16];
+       u8   ttkey[16];
+       u8      crc[4];
+       struct arc4context mycontext;
+       int                     length;
+
+       u8      *pframe, *payload, *iv, *prwskey;
+       union pn48 dot11txpn;
+       struct  sta_info                *stainfo;
+       struct  rx_pkt_attrib    *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+       struct  security_priv   *psecuritypriv = &padapter->securitypriv;
+       u32             res = _SUCCESS;
+
+
+       pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
 
        /* 4 start to decrypt recvframe */
        if (prxattrib->encrypt == _TKIP_) {
-               struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta);
-
+               stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
                if (stainfo) {
-                       int key_idx;
-                       const int iv_len = 8, icv_len = 4, key_length = 32;
-                       void *crypto_private = NULL;
-                       struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
-                       u8 key[32], iv[8], icv[4], *pframe = skb->data;
-                       struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
-                       struct security_priv *psecuritypriv = &padapter->securitypriv;
-
                        if (IS_MCAST(prxattrib->ra)) {
                                if (!psecuritypriv->binstallGrpkey) {
                                        res = _FAIL;
                                        DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
                                        goto exit;
                                }
-                               key_idx = prxattrib->key_index;
-                               memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
-                               memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+                               prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
                        } else {
-                               key_idx = 0;
-                               memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
-                               memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+                               RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
+                               prwskey = &stainfo->dot118021x_UncstKey.skey[0];
                        }
 
-                       if (!crypto_ops) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       iv = pframe+prxattrib->hdrlen;
+                       payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+                       length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
 
-                       memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
-                       memcpy(icv, pframe + skb->len - icv_len, icv_len);
+                       GET_TKIP_PN(iv, dot11txpn);
 
-                       crypto_private = crypto_ops->init(key_idx);
-                       if (!crypto_private) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       pnl = (u16)(dot11txpn.val);
+                       pnh = (u32)(dot11txpn.val>>16);
 
-                       memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
-                       skb_push(skb, iv_len);
-                       skb_put(skb, icv_len);
+                       phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+                       phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
 
-                       memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
-                       memcpy(pframe + skb->len - icv_len, icv, icv_len);
+                       /* 4 decrypt payload include icv */
 
-exit_lib80211_tkip:
-                       if (crypto_ops && crypto_private)
-                               crypto_ops->deinit(crypto_private);
+                       arcfour_init(&mycontext, rc4key, 16);
+                       arcfour_encrypt(&mycontext, payload, payload, length);
+
+                       *((__le32 *)crc) = getcrc32(payload, length-4);
+
+                       if (crc[3] != payload[length-1] ||
+                           crc[2] != payload[length-2] ||
+                           crc[1] != payload[length-3] ||
+                           crc[0] != payload[length-4]) {
+                               RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+                                        ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+                                        &crc, &payload[length-4]));
+                               res = _FAIL;
+                       }
                } else {
                        RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
                        res = _FAIL;
index a61bc41..947c795 100644 (file)
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
        int chars_sent = 0;
        char __user *cp;
        char *init;
+       size_t bytes_per_ch = unicode ? 3 : 1;
        u16 ch;
        int empty;
        unsigned long flags;
        DEFINE_WAIT(wait);
 
+       if (count < bytes_per_ch)
+               return -EINVAL;
+
        spin_lock_irqsave(&speakup_info.spinlock, flags);
        while (1) {
                prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
        init = get_initstring();
 
        /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
-       while (chars_sent <= count - 3) {
+       while (chars_sent <= count - bytes_per_ch) {
                if (speakup_info.flushing) {
                        speakup_info.flushing = 0;
                        ch = '\x18';
index 514986b..25eb389 100644 (file)
@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
        struct iscsi_param *param;
        u32 mrdsl, mbl;
        u32 max_npdu, max_iso_npdu;
+       u32 max_iso_payload;
 
        if (conn->login->leading_connection) {
                param = iscsi_find_param_from_key(MAXBURSTLENGTH,
@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
        mrdsl = conn_ops->MaxRecvDataSegmentLength;
        max_npdu = mbl / mrdsl;
 
-       max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
-                       (ISCSI_HDR_LEN + mrdsl +
+       max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
+
+       max_iso_npdu = max_iso_payload /
+                      (ISCSI_HDR_LEN + mrdsl +
                        cxgbit_digest_len[csk->submode]);
 
        csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
        if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
                conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
 
+       if (cxgbit_set_digest(csk))
+               return -1;
+
        if (conn->login->leading_connection) {
                param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
                                                  conn->param_list);
@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
                        if (is_t5(cdev->lldi.adapter_type))
                                goto enable_ddp;
                        else
-                               goto enable_digest;
+                               return 0;
                }
 
                if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
@@ -781,10 +787,6 @@ enable_ddp:
                }
        }
 
-enable_digest:
-       if (cxgbit_set_digest(csk))
-               return -1;
-
        return 0;
 }
 
index 785f0ed..ee34e90 100644 (file)
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
        depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
        select EXTCON
        select RESET_CONTROLLER
+       select USB_ULPI_BUS
        help
          Say Y here if your system has a dual role high speed USB
          controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
        help
          Say Y here to enable host controller functionality of the
          ChipIdea driver.
-
-config USB_CHIPIDEA_ULPI
-       bool "ChipIdea ULPI PHY support"
-       depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
-       help
-         Say Y here if you have a ULPI PHY attached to your ChipIdea
-         controller.
-
 endif
index e3d5e72..12df94f 100644 (file)
@@ -1,11 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_USB_CHIPIDEA)             += ci_hdrc.o
 
-ci_hdrc-y                              := core.o otg.o debug.o
+ci_hdrc-y                              := core.o otg.o debug.o ulpi.o
 ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC)     += udc.o
 ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST)    += host.o
 ci_hdrc-$(CONFIG_USB_OTG_FSM)          += otg_fsm.o
-ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI)    += ulpi.o
 
 # Glue/Bridge layers go here
 
index 0bf244d..6a2cc5c 100644 (file)
@@ -240,10 +240,8 @@ struct ci_hdrc {
 
        struct ci_hdrc_platform_data    *platdata;
        int                             vbus_active;
-#ifdef CONFIG_USB_CHIPIDEA_ULPI
        struct ulpi                     *ulpi;
        struct ulpi_ops                 ulpi_ops;
-#endif
        struct phy                      *phy;
        /* old usb_phy interface */
        struct usb_phy                  *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
 #endif
 }
 
-#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
 int ci_ulpi_init(struct ci_hdrc *ci);
 void ci_ulpi_exit(struct ci_hdrc *ci);
 int ci_ulpi_resume(struct ci_hdrc *ci);
-#else
-static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
-static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
-static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
-#endif
 
 u32 hw_read_intr_enable(struct ci_hdrc *ci);
 
index 6da42dc..dfec07e 100644 (file)
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
 {
        int cnt = 100000;
 
+       if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
+               return 0;
+
        while (cnt-- > 0) {
                if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
                        return 0;
index 998b32d..75c4623 100644 (file)
@@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
        .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
        },
+       { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+       .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+       },
 
        { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
        .driver_info = CLEAR_HALT_CONDITIONS,
index fcae521..1fb2668 100644 (file)
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
                if (!udev || udev->state == USB_STATE_NOTATTACHED) {
                        /* Tell hub_wq to disconnect the device or
-                        * check for a new connection
+                        * check for a new connection or over current condition.
+                        * Based on USB2.0 Spec Section 11.12.5,
+                        * C_PORT_OVER_CURRENT could be set while
+                        * PORT_OVER_CURRENT is not. So check for any of them.
                         */
                        if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
-                           (portstatus & USB_PORT_STAT_OVERCURRENT))
+                           (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+                           (portchange & USB_PORT_STAT_C_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
 
                } else if (portstatus & USB_PORT_STAT_ENABLE) {
index a0f82cc..cefc99a 100644 (file)
@@ -3430,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                hs_ep = hsotg->eps_in[idx];
                /* Proceed only unmasked ISOC EPs */
-               if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+               if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                        continue;
 
                epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3476,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                hs_ep = hsotg->eps_out[idx];
                /* Proceed only unmasked ISOC EPs */
-               if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+               if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                        continue;
 
                epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3650,7 +3650,7 @@ irq_retry:
                for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                        hs_ep = hsotg->eps_out[idx];
                        /* Proceed only unmasked ISOC EPs */
-                       if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+                       if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                                continue;
 
                        epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
index b1104be..6e2cdd7 100644 (file)
@@ -2665,34 +2665,35 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
 
 #define DWC2_USB_DMA_ALIGN 4
 
-struct dma_aligned_buffer {
-       void *kmalloc_ptr;
-       void *old_xfer_buffer;
-       u8 data[0];
-};
-
 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
 {
-       struct dma_aligned_buffer *temp;
+       void *stored_xfer_buffer;
+       size_t length;
 
        if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
                return;
 
-       temp = container_of(urb->transfer_buffer,
-                           struct dma_aligned_buffer, data);
+       /* Restore urb->transfer_buffer from the end of the allocated area */
+       memcpy(&stored_xfer_buffer, urb->transfer_buffer +
+              urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
 
-       if (usb_urb_dir_in(urb))
-               memcpy(temp->old_xfer_buffer, temp->data,
-                      urb->transfer_buffer_length);
-       urb->transfer_buffer = temp->old_xfer_buffer;
-       kfree(temp->kmalloc_ptr);
+       if (usb_urb_dir_in(urb)) {
+               if (usb_pipeisoc(urb->pipe))
+                       length = urb->transfer_buffer_length;
+               else
+                       length = urb->actual_length;
+
+               memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
+       }
+       kfree(urb->transfer_buffer);
+       urb->transfer_buffer = stored_xfer_buffer;
 
        urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
 }
 
 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
 {
-       struct dma_aligned_buffer *temp, *kmalloc_ptr;
+       void *kmalloc_ptr;
        size_t kmalloc_size;
 
        if (urb->num_sgs || urb->sg ||
@@ -2700,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
            !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
                return 0;
 
-       /* Allocate a buffer with enough padding for alignment */
+       /*
+        * Allocate a buffer with enough padding for original transfer_buffer
+        * pointer. This allocation is guaranteed to be aligned properly for
+        * DMA
+        */
        kmalloc_size = urb->transfer_buffer_length +
-               sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
+               sizeof(urb->transfer_buffer);
 
        kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
        if (!kmalloc_ptr)
                return -ENOMEM;
 
-       /* Position our struct dma_aligned_buffer such that data is aligned */
-       temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
-       temp->kmalloc_ptr = kmalloc_ptr;
-       temp->old_xfer_buffer = urb->transfer_buffer;
+       /*
+        * Position value of original urb->transfer_buffer pointer to the end
+        * of allocation for later referencing
+        */
+       memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+              &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
        if (usb_urb_dir_out(urb))
-               memcpy(temp->data, urb->transfer_buffer,
+               memcpy(kmalloc_ptr, urb->transfer_buffer,
                       urb->transfer_buffer_length);
-       urb->transfer_buffer = temp->data;
+       urb->transfer_buffer = kmalloc_ptr;
 
        urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
 
index ed7f05c..8ce10ca 100644 (file)
@@ -1231,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
         * avoid interrupt storms we'll wait before retrying if we've got
         * several NAKs. If we didn't do this we'd retry directly from the
         * interrupt handler and could end up quickly getting another
-        * interrupt (another NAK), which we'd retry.
+        * interrupt (another NAK), which we'd retry. Note that we do not
+        * delay retries for IN parts of control requests, as those are expected
+        * to complete fairly quickly, and if we delay them we risk confusing
+        * the device and cause it issue STALL.
         *
         * Note that in DMA mode software only gets involved to re-send NAKed
         * transfers for split transactions, so we only need to apply this
@@ -1244,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
                        qtd->error_count = 0;
                qtd->complete_split = 0;
                qtd->num_naks++;
-               qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
+               qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
+                               !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+                                 chan->ep_is_in);
                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
                goto handle_nak_done;
        }
index c77ff50..8efde17 100644 (file)
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                ret = dwc3_ep0_start_trans(dep);
        } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
                   req->request.length && req->request.zero) {
-               u32     maxpacket;
 
                ret = usb_gadget_map_request_by_dev(dwc->sysdev,
                                &req->request, dep->number);
                if (ret)
                        return;
 
-               maxpacket = dep->endpoint.maxpacket;
-
                /* prepare normal TRB */
                dwc3_ep0_prepare_one_trb(dep, req->request.dma,
                                         req->request.length,
index d2fa071..b8a1584 100644 (file)
@@ -1819,7 +1819,6 @@ unknown:
                if (cdev->use_os_string && cdev->os_desc_config &&
                    (ctrl->bRequestType & USB_TYPE_VENDOR) &&
                    ctrl->bRequest == cdev->b_vendor_code) {
-                       struct usb_request              *req;
                        struct usb_configuration        *os_desc_cfg;
                        u8                              *buf;
                        int                             interface;
index 33e2030..3ada83d 100644 (file)
@@ -3263,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
        __ffs_event_add(ffs, FUNCTIONFS_SETUP);
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 
-       return USB_GADGET_DELAYED_STATUS;
+       return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
 }
 
 static bool ffs_func_req_match(struct usb_function *f,
index d2dc1f0..d582921 100644 (file)
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
 };
 
 struct cntrl_cur_lay3 {
-       __u32   dCUR;
+       __le32  dCUR;
 };
 
 struct cntrl_range_lay3 {
-       __u16   wNumSubRanges;
-       __u32   dMIN;
-       __u32   dMAX;
-       __u32   dRES;
+       __le16  wNumSubRanges;
+       __le32  dMIN;
+       __le32  dMAX;
+       __le32  dRES;
 } __packed;
 
 static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
        if (!agdev->out_ep) {
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-               return ret;
+               return -ENODEV;
        }
 
        agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
        if (!agdev->in_ep) {
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-               return ret;
+               return -ENODEV;
        }
 
        agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
                memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
                if (entity_id == USB_IN_CLK_ID)
-                       c.dCUR = p_srate;
+                       c.dCUR = cpu_to_le32(p_srate);
                else if (entity_id == USB_OUT_CLK_ID)
-                       c.dCUR = c_srate;
+                       c.dCUR = cpu_to_le32(c_srate);
 
                value = min_t(unsigned, w_length, sizeof c);
                memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 
        if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
                if (entity_id == USB_IN_CLK_ID)
-                       r.dMIN = p_srate;
+                       r.dMIN = cpu_to_le32(p_srate);
                else if (entity_id == USB_OUT_CLK_ID)
-                       r.dMIN = c_srate;
+                       r.dMIN = cpu_to_le32(c_srate);
                else
                        return -EOPNOTSUPP;
 
                r.dMAX = r.dMIN;
                r.dRES = 0;
-               r.wNumSubRanges = 1;
+               r.wNumSubRanges = cpu_to_le16(1);
 
                value = min_t(unsigned, w_length, sizeof r);
                memcpy(req->buf, &r, value);
index a72295c..fb5ed97 100644 (file)
@@ -32,9 +32,6 @@ struct uac_req {
 struct uac_rtd_params {
        struct snd_uac_chip *uac; /* parent chip */
        bool ep_enabled; /* if the ep is enabled */
-       /* Size of the ring buffer */
-       size_t dma_bytes;
-       unsigned char *dma_area;
 
        struct snd_pcm_substream *ss;
 
@@ -43,8 +40,6 @@ struct uac_rtd_params {
 
        void *rbuf;
 
-       size_t period_size;
-
        unsigned max_psize;     /* MaxPacketSize of endpoint */
        struct uac_req *ureq;
 
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
 static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
 {
        unsigned pending;
-       unsigned long flags;
+       unsigned long flags, flags2;
        unsigned int hw_ptr;
-       bool update_alsa = false;
        int status = req->status;
        struct uac_req *ur = req->context;
        struct snd_pcm_substream *substream;
+       struct snd_pcm_runtime *runtime;
        struct uac_rtd_params *prm = ur->pp;
        struct snd_uac_chip *uac = prm->uac;
 
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
        if (!substream)
                goto exit;
 
+       snd_pcm_stream_lock_irqsave(substream, flags2);
+
+       runtime = substream->runtime;
+       if (!runtime || !snd_pcm_running(substream)) {
+               snd_pcm_stream_unlock_irqrestore(substream, flags2);
+               goto exit;
+       }
+
        spin_lock_irqsave(&prm->lock, flags);
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
                req->actual = req->length;
        }
 
-       pending = prm->hw_ptr % prm->period_size;
-       pending += req->actual;
-       if (pending >= prm->period_size)
-               update_alsa = true;
-
        hw_ptr = prm->hw_ptr;
-       prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
 
        spin_unlock_irqrestore(&prm->lock, flags);
 
        /* Pack USB load in ALSA ring buffer */
-       pending = prm->dma_bytes - hw_ptr;
+       pending = runtime->dma_bytes - hw_ptr;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                if (unlikely(pending < req->actual)) {
-                       memcpy(req->buf, prm->dma_area + hw_ptr, pending);
-                       memcpy(req->buf + pending, prm->dma_area,
+                       memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+                       memcpy(req->buf + pending, runtime->dma_area,
                               req->actual - pending);
                } else {
-                       memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
+                       memcpy(req->buf, runtime->dma_area + hw_ptr,
+                              req->actual);
                }
        } else {
                if (unlikely(pending < req->actual)) {
-                       memcpy(prm->dma_area + hw_ptr, req->buf, pending);
-                       memcpy(prm->dma_area, req->buf + pending,
+                       memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+                       memcpy(runtime->dma_area, req->buf + pending,
                               req->actual - pending);
                } else {
-                       memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
+                       memcpy(runtime->dma_area + hw_ptr, req->buf,
+                              req->actual);
                }
        }
 
+       spin_lock_irqsave(&prm->lock, flags);
+       /* update hw_ptr after data is copied to memory */
+       prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+       hw_ptr = prm->hw_ptr;
+       spin_unlock_irqrestore(&prm->lock, flags);
+       snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+       if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+               snd_pcm_period_elapsed(substream);
+
 exit:
        if (usb_ep_queue(ep, req, GFP_ATOMIC))
                dev_err(uac->card->dev, "%d Error!\n", __LINE__);
-
-       if (update_alsa)
-               snd_pcm_period_elapsed(substream);
 }
 
 static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
 static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
                               struct snd_pcm_hw_params *hw_params)
 {
-       struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
-       struct uac_rtd_params *prm;
-       int err;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prm = &uac->p_prm;
-       else
-               prm = &uac->c_prm;
-
-       err = snd_pcm_lib_malloc_pages(substream,
+       return snd_pcm_lib_malloc_pages(substream,
                                        params_buffer_bytes(hw_params));
-       if (err >= 0) {
-               prm->dma_bytes = substream->runtime->dma_bytes;
-               prm->dma_area = substream->runtime->dma_area;
-               prm->period_size = params_period_bytes(hw_params);
-       }
-
-       return err;
 }
 
 static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
 {
-       struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
-       struct uac_rtd_params *prm;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prm = &uac->p_prm;
-       else
-               prm = &uac->c_prm;
-
-       prm->dma_area = NULL;
-       prm->dma_bytes = 0;
-       prm->period_size = 0;
-
        return snd_pcm_lib_free_pages(substream);
 }
 
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
        if (err < 0)
                goto snd_fail;
 
-       strcpy(pcm->name, pcm_name);
+       strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
        pcm->private_data = uac;
        uac->pcm = pcm;
 
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
 
-       strcpy(card->driver, card_name);
-       strcpy(card->shortname, card_name);
+       strlcpy(card->driver, card_name, sizeof(card->driver));
+       strlcpy(card->shortname, card_name, sizeof(card->shortname));
        sprintf(card->longname, "%s %i", card_name, card->dev->id);
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
index 20ffb03..e2927fb 100644 (file)
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
        /* Check our state, cancel pending requests if needed */
        if (ep->ep0.state != ep0_state_token) {
                EPDBG(ep, "wrong state\n");
+               ast_vhub_nuke(ep, -EIO);
+
+               /*
+                * Accept the packet regardless, this seems to happen
+                * when stalling a SETUP packet that has an OUT data
+                * phase.
+                */
                ast_vhub_nuke(ep, 0);
                goto stall;
        }
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
        if (chunk && req->req.buf)
                memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
 
+       vhub_dma_workaround(ep->buf);
+
        /* Remember chunk size and trigger send */
        reg = VHUB_EP0_SET_TX_LEN(chunk);
        writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
        EPVDBG(ep, "rx prime\n");
 
        /* Prime endpoint for receiving data */
-       writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
+       writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
 }
 
 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
index 80c9fea..5939eb1 100644 (file)
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
        if (!req->req.dma) {
 
                /* For IN transfers, copy data over first */
-               if (ep->epn.is_in)
+               if (ep->epn.is_in) {
                        memcpy(ep->buf, req->req.buf + act, chunk);
+                       vhub_dma_workaround(ep->buf);
+               }
                writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
-       } else
+       } else {
+               if (ep->epn.is_in)
+                       vhub_dma_workaround(req->req.buf);
                writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+       }
 
        /* Start DMA */
        req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
                                   struct ast_vhub_req *req)
 {
+       struct ast_vhub_desc *desc = NULL;
        unsigned int act = req->act_count;
        unsigned int len = req->req.length;
        unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
 
        /* While we can create descriptors */
        while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
-               struct ast_vhub_desc *desc;
                unsigned int d_num;
 
                /* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
                req->act_count = act = act + chunk;
        }
 
+       if (likely(desc))
+               vhub_dma_workaround(desc);
+
        /* Tell HW about new descriptors */
        writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
               ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
index 2b04025..4ed03d3 100644 (file)
@@ -462,6 +462,39 @@ enum std_req_rc {
 #define DDBG(d, fmt, ...)      do { } while(0)
 #endif
 
+static inline void vhub_dma_workaround(void *addr)
+{
+       /*
+        * This works around a confirmed HW issue with the Aspeed chip.
+        *
+        * The core uses a different bus to memory than the AHB going to
+        * the USB device controller. Due to the latter having a higher
+        * priority than the core for arbitration on that bus, it's
+        * possible for an MMIO to the device, followed by a DMA by the
+        * device from memory to all be performed and services before
+        * a previous store to memory gets completed.
+        *
+        * This the following scenario can happen:
+        *
+        *    - Driver writes to a DMA descriptor (Mbus)
+        *    - Driver writes to the MMIO register to start the DMA (AHB)
+        *    - The gadget sees the second write and sends a read of the
+        *      descriptor to the memory controller (Mbus)
+        *    - The gadget hits memory before the descriptor write
+        *      causing it to read an obsolete value.
+        *
+        * Thankfully the problem is limited to the USB gadget device, other
+        * masters in the SoC all have a lower priority than the core, thus
+        * ensuring that the store by the core arrives first.
+        *
+        * The workaround consists of using a dummy read of the memory before
+        * doing the MMIO writes. This will ensure that the previous writes
+        * have been "pushed out".
+        */
+       mb();
+       (void)__raw_readl((void __iomem *)addr);
+}
+
 /* core.c */
 void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
                   int status);
index a3ecce6..11e25a3 100644 (file)
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
 
                r8a66597_bset(r8a66597, XCKE, SYSCFG0);
 
-               msleep(3);
+               mdelay(3);
 
                r8a66597_bset(r8a66597, PLLC, SYSCFG0);
 
-               msleep(1);
+               mdelay(1);
 
                r8a66597_bset(r8a66597, SCKE, SYSCFG0);
 
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
        r8a66597->ep0_req->length = 2;
        /* AV: what happens if we get called again before that gets through? */
        spin_unlock(&r8a66597->lock);
-       r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+       r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
        spin_lock(&r8a66597->lock);
 }
 
index 2f4850f..68e6132 100644 (file)
@@ -3051,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        if (!list_empty(&ep->ring->td_list)) {
                dev_err(&udev->dev, "EP not empty, refuse reset\n");
                spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
                goto cleanup;
        }
        xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
index 900875f..f7c96d2 100644 (file)
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
        if (pdata->init && pdata->init(pdev) != 0)
                return -EINVAL;
 
+#ifdef CONFIG_PPC32
        if (pdata->big_endian_mmio) {
                _fsl_readl = _fsl_readl_be;
                _fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
                _fsl_readl = _fsl_readl_le;
                _fsl_writel = _fsl_writel_le;
        }
+#endif
 
        /* request irq */
        p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
 /*
  * state file in sysfs
  */
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct otg_fsm *fsm = &fsl_otg_dev->fsm;
index 150f436..d1d2025 100644 (file)
@@ -2140,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
                         * PPS APDO. Again skip the first sink PDO as this will
                         * always be 5V 3A.
                         */
-                       for (j = i; j < port->nr_snk_pdo; j++) {
+                       for (j = 1; j < port->nr_snk_pdo; j++) {
                                pdo = port->snk_pdo[j];
 
                                switch (pdo_type(pdo)) {
index a502f1a..ed31145 100644 (file)
@@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
        d->iotlb = niotlb;
 
        for (i = 0; i < d->nvqs; ++i) {
-               mutex_lock(&d->vqs[i]->mutex);
-               d->vqs[i]->iotlb = niotlb;
-               mutex_unlock(&d->vqs[i]->mutex);
+               struct vhost_virtqueue *vq = d->vqs[i];
+
+               mutex_lock(&vq->mutex);
+               vq->iotlb = niotlb;
+               __vhost_vq_meta_reset(vq);
+               mutex_unlock(&vq->mutex);
        }
 
        vhost_umem_clean(oiotlb);
index 46a4484..c6f78d2 100644 (file)
@@ -20,7 +20,7 @@
 #include <drm/drm_connector.h>  /* For DRM_MODE_PANEL_ORIENTATION_* */
 
 static bool request_mem_succeeded = false;
-static bool nowc = false;
+static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
@@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static void efifb_destroy(struct fb_info *info)
 {
-       if (info->screen_base)
-               iounmap(info->screen_base);
+       if (info->screen_base) {
+               if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+                       iounmap(info->screen_base);
+               else
+                       memunmap(info->screen_base);
+       }
        if (request_mem_succeeded)
                release_mem_region(info->apertures->ranges[0].base,
                                   info->apertures->ranges[0].size);
@@ -104,7 +108,7 @@ static int efifb_setup(char *options)
                        else if (!strncmp(this_opt, "width:", 6))
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
-                               nowc = true;
+                               mem_flags &= ~EFI_MEMORY_WC;
                }
        }
 
@@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev)
        unsigned int size_remap;
        unsigned int size_total;
        char *option = NULL;
+       efi_memory_desc_t md;
 
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
@@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       if (nowc)
-               info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
-       else
-               info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+       if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+               if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+                   (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+                       pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+                              efifb_fix.smem_start);
+                       err = -EIO;
+                       goto err_release_fb;
+               }
+               /*
+                * If the UEFI memory map covers the efifb region, we may only
+                * remap it using the attributes the memory map prescribes.
+                */
+               mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+               mem_flags &= md.attribute;
+       }
+       if (mem_flags & EFI_MEMORY_WC)
+               info->screen_base = ioremap_wc(efifb_fix.smem_start,
+                                              efifb_fix.smem_len);
+       else if (mem_flags & EFI_MEMORY_UC)
+               info->screen_base = ioremap(efifb_fix.smem_start,
+                                           efifb_fix.smem_len);
+       else if (mem_flags & EFI_MEMORY_WT)
+               info->screen_base = memremap(efifb_fix.smem_start,
+                                            efifb_fix.smem_len, MEMREMAP_WT);
+       else if (mem_flags & EFI_MEMORY_WB)
+               info->screen_base = memremap(efifb_fix.smem_start,
+                                            efifb_fix.smem_len, MEMREMAP_WB);
        if (!info->screen_base) {
-               pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+               pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
                        efifb_fix.smem_len, efifb_fix.smem_start);
                err = -EIO;
                goto err_release_fb;
@@ -371,7 +399,10 @@ err_fb_dealoc:
 err_groups:
        sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
 err_unmap:
-       iounmap(info->screen_base);
+       if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+               iounmap(info->screen_base);
+       else
+               memunmap(info->screen_base);
 err_release_fb:
        framebuffer_release(info);
 err_release_mem:
index 6b237e3..3988c09 100644 (file)
@@ -513,7 +513,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
        tell_host(vb, vb->inflate_vq);
 
        /* balloon's page migration 2nd step -- deflate "page" */
+       spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
        balloon_page_delete(page);
+       spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
        vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
        set_page_pfns(vb, vb->pfns, page);
        tell_host(vb, vb->deflate_vq);
index 0dd87aa..aba2541 100644 (file)
@@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
 
        ret = bio_iov_iter_get_pages(&bio, iter);
        if (unlikely(ret))
-               return ret;
+               goto out;
        ret = bio.bi_iter.bi_size;
 
        if (iov_iter_rw(iter) == READ) {
@@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                put_page(bvec->bv_page);
        }
 
-       if (vecs != inline_vecs)
-               kfree(vecs);
-
        if (unlikely(bio.bi_status))
                ret = blk_status_to_errno(bio.bi_status);
 
+out:
+       if (vecs != inline_vecs)
+               kfree(vecs);
+
        bio_uninit(&bio);
 
        return ret;
index d9f0010..4a717d4 100644 (file)
@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
                           "%s",
                           fsdef->dentry->d_sb->s_id);
 
-       fscache_object_init(&fsdef->fscache, NULL, &cache->cache);
+       fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
+                           &cache->cache);
 
        ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
        if (ret < 0)
index ab0bbe9..af2b17b 100644 (file)
@@ -186,12 +186,12 @@ try_again:
         * need to wait for it to be destroyed */
 wait_for_old_object:
        trace_cachefiles_wait_active(object, dentry, xobject);
+       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
 
        if (fscache_object_is_live(&xobject->fscache)) {
                pr_err("\n");
                pr_err("Error: Unexpected object collision\n");
                cachefiles_printk_object(object, xobject);
-               BUG();
        }
        atomic_inc(&xobject->usage);
        write_unlock(&cache->active_lock);
@@ -248,7 +248,6 @@ wait_for_old_object:
        goto try_again;
 
 requeue:
-       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
        cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
        _leave(" = -ETIMEDOUT");
        return -ETIMEDOUT;
index 5082c8a..40f7595 100644 (file)
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        struct cachefiles_one_read *monitor =
                container_of(wait, struct cachefiles_one_read, monitor);
        struct cachefiles_object *object;
+       struct fscache_retrieval *op = monitor->op;
        struct wait_bit_key *key = _key;
        struct page *page = wait->private;
 
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        list_del(&wait->entry);
 
        /* move onto the action list and queue for FS-Cache thread pool */
-       ASSERT(monitor->op);
+       ASSERT(op);
 
-       object = container_of(monitor->op->op.object,
-                             struct cachefiles_object, fscache);
+       /* We need to temporarily bump the usage count as we don't own a ref
+        * here otherwise cachefiles_read_copier() may free the op between the
+        * monitor being enqueued on the op->to_do list and the op getting
+        * enqueued on the work queue.
+        */
+       fscache_get_retrieval(op);
 
+       object = container_of(op->op.object, struct cachefiles_object, fscache);
        spin_lock(&object->work_lock);
-       list_add_tail(&monitor->op_link, &monitor->op->to_do);
+       list_add_tail(&monitor->op_link, &op->to_do);
        spin_unlock(&object->work_lock);
 
-       fscache_enqueue_retrieval(monitor->op);
+       fscache_enqueue_retrieval(op);
+       fscache_put_retrieval(op);
        return 0;
 }
 
index 0e8e5de..ceb7b49 100644 (file)
@@ -358,14 +358,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
        __releases(dentry->d_inode->i_lock)
 {
        struct inode *inode = dentry->d_inode;
-       bool hashed = !d_unhashed(dentry);
 
-       if (hashed)
-               raw_write_seqcount_begin(&dentry->d_seq);
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
-       if (hashed)
-               raw_write_seqcount_end(&dentry->d_seq);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
        if (!inode->i_nlink)
@@ -1932,10 +1929,12 @@ struct dentry *d_make_root(struct inode *root_inode)
 
        if (root_inode) {
                res = d_alloc_anon(root_inode->i_sb);
-               if (res)
+               if (res) {
+                       res->d_flags |= DCACHE_RCUACCESS;
                        d_instantiate(res, root_inode);
-               else
+               } else {
                        iput(root_inode);
+               }
        }
        return res;
 }
index 71fcccc..8c6ab6c 100644 (file)
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
        /* length of the variable name itself: remove GUID and separator */
        namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
 
-       uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+       err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+       if (err)
+               goto out;
 
        if (efivar_variable_is_removable(var->var.VendorGuid,
                                         dentry->d_name.name, namelen))
index 72e961a..bdd0eac 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -293,6 +293,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        bprm->vma = vma = vm_area_alloc(mm);
        if (!vma)
                return -ENOMEM;
+       vma_set_anonymous(vma);
 
        if (down_write_killable(&mm->mmap_sem)) {
                err = -EINTR;
index e68cefe..aa52d87 100644 (file)
@@ -368,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
 
        ext4_lock_group(sb, block_group);
+       if (buffer_verified(bh))
+               goto verified;
        if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
                        desc, bh))) {
                ext4_unlock_group(sb, block_group);
@@ -386,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
        }
        set_buffer_verified(bh);
+verified:
        ext4_unlock_group(sb, block_group);
        return 0;
 }
index fb83750..f336cbc 100644 (file)
@@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
 
        ext4_lock_group(sb, block_group);
+       if (buffer_verified(bh))
+               goto verified;
        blk = ext4_inode_bitmap(sb, desc);
        if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
                                           EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
                return -EFSBADCRC;
        }
        set_buffer_verified(bh);
+verified:
        ext4_unlock_group(sb, block_group);
        return 0;
 }
@@ -1385,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
                            ext4_itable_unused_count(sb, gdp)),
                            sbi->s_inodes_per_block);
 
-       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+                              ext4_itable_unused_count(sb, gdp)) <
+                             EXT4_FIRST_INO(sb)))) {
                ext4_error(sb, "Something is wrong with group %u: "
                           "used itable blocks: %d; "
                           "itable unused count: %u",
index e55a8bc..3543fe8 100644 (file)
@@ -682,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
                goto convert;
        }
 
+       ret = ext4_journal_get_write_access(handle, iloc.bh);
+       if (ret)
+               goto out;
+
        flags |= AOP_FLAG_NOFS;
 
        page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -710,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
 out_up_read:
        up_read(&EXT4_I(inode)->xattr_sem);
 out:
-       if (handle)
+       if (handle && (ret != 1))
                ext4_journal_stop(handle);
        brelse(iloc.bh);
        return ret;
@@ -752,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
 
        ext4_write_unlock_xattr(inode, &no_expand);
        brelse(iloc.bh);
+       mark_inode_dirty(inode);
 out:
        return copied;
 }
@@ -898,7 +903,6 @@ retry_journal:
                goto out;
        }
 
-
        page = grab_cache_page_write_begin(mapping, 0, flags);
        if (!page) {
                ret = -ENOMEM;
@@ -916,6 +920,9 @@ retry_journal:
                if (ret < 0)
                        goto out_release_page;
        }
+       ret = ext4_journal_get_write_access(handle, iloc.bh);
+       if (ret)
+               goto out_release_page;
 
        up_read(&EXT4_I(inode)->xattr_sem);
        *pagep = page;
@@ -936,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
                                  unsigned len, unsigned copied,
                                  struct page *page)
 {
-       int i_size_changed = 0;
        int ret;
 
        ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -954,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
         * But it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
         */
-       if (pos+copied > inode->i_size) {
+       if (pos+copied > inode->i_size)
                i_size_write(inode, pos+copied);
-               i_size_changed = 1;
-       }
        unlock_page(page);
        put_page(page);
 
@@ -967,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed)
-               mark_inode_dirty(inode);
+       mark_inode_dirty(inode);
 
        return copied;
 }
index 7d6c100..4efe772 100644 (file)
@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
        loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
+       int inline_data = ext4_has_inline_data(inode);
 
        trace_ext4_write_end(inode, pos, len, copied);
-       if (ext4_has_inline_data(inode)) {
+       if (inline_data) {
                ret = ext4_write_inline_data_end(inode, pos, len,
                                                 copied, page);
                if (ret < 0) {
@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed)
+       if (i_size_changed || inline_data)
                ext4_mark_inode_dirty(handle, inode);
 
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
        int partial = 0;
        unsigned from, to;
        int size_changed = 0;
+       int inline_data = ext4_has_inline_data(inode);
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_SIZE - 1);
@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (ext4_has_inline_data(inode)) {
+       if (inline_data) {
                ret = ext4_write_inline_data_end(inode, pos, len,
                                                 copied, page);
                if (ret < 0) {
@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
        if (old_size < pos)
                pagecache_isize_extended(inode, old_size, pos);
 
-       if (size_changed) {
+       if (size_changed || inline_data) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
        }
 
        if (inline_data) {
-               BUFFER_TRACE(inode_bh, "get write access");
-               ret = ext4_journal_get_write_access(handle, inode_bh);
-
-               err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
-
+               ret = ext4_mark_inode_dirty(handle, inode);
        } else {
                ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
                                             do_journal_get_write_access);
index 27b9a76..638ad47 100644 (file)
@@ -186,11 +186,8 @@ static int kmmpd(void *data)
                        goto exit_thread;
                }
 
-               if (sb_rdonly(sb)) {
-                       ext4_warning(sb, "kmmpd being stopped since filesystem "
-                                    "has been remounted as readonly.");
-                       goto exit_thread;
-               }
+               if (sb_rdonly(sb))
+                       break;
 
                diff = jiffies - last_update_time;
                if (diff < mmp_update_interval * HZ)
index ba2396a..b7f7922 100644 (file)
@@ -2342,7 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
        ext4_fsblk_t last_block;
-       ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
+       ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
        ext4_fsblk_t block_bitmap;
        ext4_fsblk_t inode_bitmap;
        ext4_fsblk_t inode_table;
@@ -3141,14 +3141,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
                if (!gdp)
                        continue;
 
-               if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
-                       continue;
-               if (group != 0)
+               if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
                        break;
-               ext4_error(sb, "Inode table for bg 0 marked as "
-                          "needing zeroing");
-               if (sb_rdonly(sb))
-                       return ngroups;
        }
 
        return group;
@@ -4085,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
+       sbi->s_gdb_count = db_count;
        if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
                ret = -EFSCORRUPTED;
                goto failed_mount2;
        }
 
-       sbi->s_gdb_count = db_count;
-
        timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
 
        /* Register extent status tree shrinker */
@@ -5213,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 
                        if (sbi->s_journal)
                                ext4_mark_recovery_complete(sb, es);
+                       if (sbi->s_mmp_tsk)
+                               kthread_stop(sbi->s_mmp_tsk);
                } else {
                        /* Make sure we can mount this feature set readwrite */
                        if (ext4_has_feature_readonly(sb) ||
index c184c5a..cdcb376 100644 (file)
@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
 {
        struct fscache_cache_tag *tag;
 
+       ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
        BUG_ON(!cache->ops);
        BUG_ON(!ifsdef);
 
@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
        if (!cache->kobj)
                goto error;
 
-       ifsdef->cookie = &fscache_fsdef_index;
        ifsdef->cache = cache;
        cache->fsdef = ifsdef;
 
index 97137d7..83bfe04 100644 (file)
@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
                goto error;
        }
 
+       ASSERTCMP(object->cookie, ==, cookie);
        fscache_stat(&fscache_n_object_alloc);
 
        object->debug_id = atomic_inc_return(&fscache_object_debug_id);
@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
 
        _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
 
+       ASSERTCMP(object->cookie, ==, cookie);
+
        spin_lock(&cookie->lock);
 
        /* there may be multiple initial creations of this object, but we only
@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
                spin_unlock(&cache->object_list_lock);
        }
 
-       /* attach to the cookie */
-       object->cookie = cookie;
-       fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
+       /* Attach to the cookie.  The object already has a ref on it. */
        hlist_add_head(&object->cookie_link, &cookie->backing_objects);
 
        fscache_objlist_add(object);
index 20e0d0a..9edc920 100644 (file)
@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
        object->store_limit_l = 0;
        object->cache = cache;
        object->cookie = cookie;
+       fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
        object->parent = NULL;
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
        RB_CLEAR_NODE(&object->objlist_link);
index e30c597..8d26579 100644 (file)
@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
        ASSERT(op->processor != NULL);
        ASSERT(fscache_object_is_available(op->object));
        ASSERTCMP(atomic_read(&op->usage), >, 0);
-       ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+       ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+                   op->state, ==,  FSCACHE_OP_ST_CANCELLED);
 
        fscache_stat(&fscache_n_op_enqueue);
        switch (op->flags & FSCACHE_OP_TYPE) {
@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
        struct fscache_cache *cache;
 
        _enter("{OBJ%x OP%x,%d}",
-              op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+              op->object ? op->object->debug_id : 0,
+              op->debug_id, atomic_read(&op->usage));
 
        ASSERTCMP(atomic_read(&op->usage), >, 0);
 
index d508c78..40d4c66 100644 (file)
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
        bool truncate_op = (lend == LLONG_MAX);
 
        memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+       vma_init(&pseudo_vma, current->mm);
        pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pagevec_init(&pvec);
        next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
         * as input to create an allocation policy.
         */
        memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+       vma_init(&pseudo_vma, mm);
        pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pseudo_vma.vm_file = file;
 
index 77397b5..0d0bd88 100644 (file)
@@ -1443,7 +1443,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
                const struct iomap_ops *ops)
 {
        struct inode *inode = mapping->host;
-       loff_t pos = bno >> inode->i_blkbits;
+       loff_t pos = bno << inode->i_blkbits;
        unsigned blocksize = i_blocksize(inode);
 
        if (filemap_write_and_wait(mapping))
index 395c4c0..1682a87 100644 (file)
@@ -115,6 +115,13 @@ struct dinode {
                                        dxd_t _dxd;     /* 16: */
                                        union {
                                                __le32 _rdev;   /* 4: */
+                                               /*
+                                                * The fast symlink area
+                                                * is expected to overflow
+                                                * into _inlineea when
+                                                * needed (which will clear
+                                                * INLINEEA).
+                                                */
                                                u8 _fastsymlink[128];
                                        } _u;
                                        u8 _inlineea[128];
index 1f26d19..9940a1e 100644 (file)
@@ -87,6 +87,7 @@ struct jfs_inode_info {
                struct {
                        unchar _unused[16];     /* 16: */
                        dxd_t _dxd;             /* 16: */
+                       /* _inline may overflow into _inline_ea when needed */
                        unchar _inline[128];    /* 128: inline symlink */
                        /* _inline_ea may overlay the last part of
                         * file._xtroot if maxentry = XTROOTINITSLOT
index 1b9264f..f085714 100644 (file)
@@ -967,8 +967,7 @@ static int __init init_jfs_fs(void)
        jfs_inode_cachep =
            kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
                        0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
-                       offsetof(struct jfs_inode_info, i_inline),
-                       sizeof_field(struct jfs_inode_info, i_inline),
+                       offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
                        init_once);
        if (jfs_inode_cachep == NULL)
                return -ENOMEM;
index 8ddd148..bd2f4c6 100644 (file)
@@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
                return 0;
        mnt = real_mount(bastard);
        mnt_add_count(mnt, 1);
+       smp_mb();                       // see mntput_no_expire()
        if (likely(!read_seqretry(&mount_lock, seq)))
                return 0;
        if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
                mnt_add_count(mnt, -1);
                return 1;
        }
+       lock_mount_hash();
+       if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+               mnt_add_count(mnt, -1);
+               unlock_mount_hash();
+               return 1;
+       }
+       unlock_mount_hash();
+       /* caller will mntput() */
        return -1;
 }
 
@@ -1195,12 +1204,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
 static void mntput_no_expire(struct mount *mnt)
 {
        rcu_read_lock();
-       mnt_add_count(mnt, -1);
-       if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+       if (likely(READ_ONCE(mnt->mnt_ns))) {
+               /*
+                * Since we don't do lock_mount_hash() here,
+                * ->mnt_ns can change under us.  However, if it's
+                * non-NULL, then there's a reference that won't
+                * be dropped until after an RCU delay done after
+                * turning ->mnt_ns NULL.  So if we observe it
+                * non-NULL under rcu_read_lock(), the reference
+                * we are dropping is not the final one.
+                */
+               mnt_add_count(mnt, -1);
                rcu_read_unlock();
                return;
        }
        lock_mount_hash();
+       /*
+        * make sure that if __legitimize_mnt() has not seen us grab
+        * mount_lock, we'll see their refcount increment here.
+        */
+       smp_mb();
+       mnt_add_count(mnt, -1);
        if (mnt_get_count(mnt)) {
                rcu_read_unlock();
                unlock_mount_hash();
index 6dd1468..f6c4ccd 100644 (file)
@@ -6466,34 +6466,34 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
                if (data->arg.new_lock && !data->cancelled) {
                        data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
                        if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
-                               break;
+                               goto out_restart;
                }
-
                if (data->arg.new_lock_owner != 0) {
                        nfs_confirm_seqid(&lsp->ls_seqid, 0);
                        nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
                        set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
-                       goto out_done;
-               } else if (nfs4_update_lock_stateid(lsp, &data->res.stateid))
-                       goto out_done;
-
+               } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
+                       goto out_restart;
                break;
        case -NFS4ERR_BAD_STATEID:
        case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_STALE_STATEID:
        case -NFS4ERR_EXPIRED:
                if (data->arg.new_lock_owner != 0) {
-                       if (nfs4_stateid_match(&data->arg.open_stateid,
+                       if (!nfs4_stateid_match(&data->arg.open_stateid,
                                                &lsp->ls_state->open_stateid))
-                               goto out_done;
-               } else if (nfs4_stateid_match(&data->arg.lock_stateid,
+                               goto out_restart;
+               } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
                                                &lsp->ls_stateid))
-                               goto out_done;
+                               goto out_restart;
        }
-       if (!data->cancelled)
-               rpc_restart_call_prepare(task);
 out_done:
        dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
+       return;
+out_restart:
+       if (!data->cancelled)
+               rpc_restart_call_prepare(task);
+       goto out_done;
 }
 
 static void nfs4_lock_release(void *calldata)
@@ -6502,7 +6502,7 @@ static void nfs4_lock_release(void *calldata)
 
        dprintk("%s: begin!\n", __func__);
        nfs_free_seqid(data->arg.open_seqid);
-       if (data->cancelled) {
+       if (data->cancelled && data->rpc_status == 0) {
                struct rpc_task *task;
                task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
                                data->arg.lock_seqid);
index 2751476..f098b9f 100644 (file)
@@ -167,6 +167,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
        }
 
        if (compressed) {
+               if (!msblk->stream)
+                       goto read_failure;
                length = squashfs_decompress(msblk, bh, b, offset, length,
                        output);
                if (length < 0)
index 23813c0..0839efa 100644 (file)
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
 
        TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 
+       if (unlikely(length < 0))
+               return -EIO;
+
        while (length) {
                entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
                if (entry->error) {
index 13d8094..f1c1430 100644 (file)
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
                }
 
                for (i = 0; i < blocks; i++) {
-                       int size = le32_to_cpu(blist[i]);
+                       int size = squashfs_block_size(blist[i]);
+                       if (size < 0) {
+                               err = size;
+                               goto failure;
+                       }
                        block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
                }
                n -= blocks;
@@ -367,7 +371,24 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
                        sizeof(size));
        if (res < 0)
                return res;
-       return le32_to_cpu(size);
+       return squashfs_block_size(size);
+}
+
+void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
+{
+       int copied;
+       void *pageaddr;
+
+       pageaddr = kmap_atomic(page);
+       copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
+       memset(pageaddr + copied, 0, PAGE_SIZE - copied);
+       kunmap_atomic(pageaddr);
+
+       flush_dcache_page(page);
+       if (copied == avail)
+               SetPageUptodate(page);
+       else
+               SetPageError(page);
 }
 
 /* Copy data into page cache  */
@@ -376,7 +397,6 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
 {
        struct inode *inode = page->mapping->host;
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-       void *pageaddr;
        int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
        int start_index = page->index & ~mask, end_index = start_index | mask;
 
@@ -402,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
                if (PageUptodate(push_page))
                        goto skip_page;
 
-               pageaddr = kmap_atomic(push_page);
-               squashfs_copy_data(pageaddr, buffer, offset, avail);
-               memset(pageaddr + avail, 0, PAGE_SIZE - avail);
-               kunmap_atomic(pageaddr);
-               flush_dcache_page(push_page);
-               SetPageUptodate(push_page);
+               squashfs_fill_page(push_page, buffer, offset, avail);
 skip_page:
                unlock_page(push_page);
                if (i != page->index)
@@ -416,10 +431,9 @@ skip_page:
 }
 
 /* Read datablock stored packed inside a fragment (tail-end packed block) */
-static int squashfs_readpage_fragment(struct page *page)
+static int squashfs_readpage_fragment(struct page *page, int expected)
 {
        struct inode *inode = page->mapping->host;
-       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
                squashfs_i(inode)->fragment_block,
                squashfs_i(inode)->fragment_size);
@@ -430,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
                        squashfs_i(inode)->fragment_block,
                        squashfs_i(inode)->fragment_size);
        else
-               squashfs_copy_cache(page, buffer, i_size_read(inode) &
-                       (msblk->block_size - 1),
+               squashfs_copy_cache(page, buffer, expected,
                        squashfs_i(inode)->fragment_offset);
 
        squashfs_cache_put(buffer);
        return res;
 }
 
-static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
+static int squashfs_readpage_sparse(struct page *page, int expected)
 {
-       struct inode *inode = page->mapping->host;
-       struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-       int bytes = index == file_end ?
-                       (i_size_read(inode) & (msblk->block_size - 1)) :
-                        msblk->block_size;
-
-       squashfs_copy_cache(page, NULL, bytes, 0);
+       squashfs_copy_cache(page, NULL, expected, 0);
        return 0;
 }
 
@@ -456,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        int index = page->index >> (msblk->block_log - PAGE_SHIFT);
        int file_end = i_size_read(inode) >> msblk->block_log;
+       int expected = index == file_end ?
+                       (i_size_read(inode) & (msblk->block_size - 1)) :
+                        msblk->block_size;
        int res;
        void *pageaddr;
 
@@ -474,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
                        goto error_out;
 
                if (bsize == 0)
-                       res = squashfs_readpage_sparse(page, index, file_end);
+                       res = squashfs_readpage_sparse(page, expected);
                else
-                       res = squashfs_readpage_block(page, block, bsize);
+                       res = squashfs_readpage_block(page, block, bsize, expected);
        } else
-               res = squashfs_readpage_fragment(page);
+               res = squashfs_readpage_fragment(page, expected);
 
        if (!res)
                return 0;
index f2310d2..a9ba8d9 100644 (file)
@@ -20,7 +20,7 @@
 #include "squashfs.h"
 
 /* Read separately compressed datablock and memcopy into page cache */
-int squashfs_readpage_block(struct page *page, u64 block, int bsize)
+int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
 {
        struct inode *i = page->mapping->host;
        struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
                ERROR("Unable to read page, block %llx, size %x\n", block,
                        bsize);
        else
-               squashfs_copy_cache(page, buffer, buffer->length, 0);
+               squashfs_copy_cache(page, buffer, expected, 0);
 
        squashfs_cache_put(buffer);
        return res;
index cb485d8..80db1b8 100644 (file)
 #include "page_actor.h"
 
 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
-       int pages, struct page **page);
+       int pages, struct page **page, int bytes);
 
 /* Read separately compressed datablock directly into page cache */
-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
+int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
+       int expected)
 
 {
        struct inode *inode = target_page->mapping->host;
@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
                 * using an intermediate buffer.
                 */
                res = squashfs_read_cache(target_page, block, bsize, pages,
-                                                               page);
+                                                       page, expected);
                if (res < 0)
                        goto mark_errored;
 
@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
        if (res < 0)
                goto mark_errored;
 
+       if (res != expected) {
+               res = -EIO;
+               goto mark_errored;
+       }
+
        /* Last page may have trailing bytes not filled */
        bytes = res % PAGE_SIZE;
        if (bytes) {
@@ -138,13 +144,12 @@ out:
 
 
 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
-       int pages, struct page **page)
+       int pages, struct page **page, int bytes)
 {
        struct inode *i = target_page->mapping->host;
        struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
                                                 block, bsize);
-       int bytes = buffer->length, res = buffer->error, n, offset = 0;
-       void *pageaddr;
+       int res = buffer->error, n, offset = 0;
 
        if (res) {
                ERROR("Unable to read page, block %llx, size %x\n", block,
@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
                if (page[n] == NULL)
                        continue;
 
-               pageaddr = kmap_atomic(page[n]);
-               squashfs_copy_data(pageaddr, buffer, offset, avail);
-               memset(pageaddr + avail, 0, PAGE_SIZE - avail);
-               kunmap_atomic(pageaddr);
-               flush_dcache_page(page[n]);
-               SetPageUptodate(page[n]);
+               squashfs_fill_page(page[n], buffer, offset, avail);
                unlock_page(page[n]);
                if (page[n] != target_page)
                        put_page(page[n]);
index 0ed6edb..0681fea 100644 (file)
@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
                                u64 *fragment_block)
 {
        struct squashfs_sb_info *msblk = sb->s_fs_info;
-       int block = SQUASHFS_FRAGMENT_INDEX(fragment);
-       int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
-       u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
+       int block, offset, size;
        struct squashfs_fragment_entry fragment_entry;
-       int size;
+       u64 start_block;
+
+       if (fragment >= msblk->fragments)
+               return -EIO;
+       block = SQUASHFS_FRAGMENT_INDEX(fragment);
+       offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+
+       start_block = le64_to_cpu(msblk->fragment_index[block]);
 
        size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
                                        &offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
                return size;
 
        *fragment_block = le64_to_cpu(fragment_entry.start_block);
-       size = le32_to_cpu(fragment_entry.size);
-
-       return size;
+       return squashfs_block_size(fragment_entry.size);
 }
 
 
index 887d6d2..f89f8a7 100644 (file)
@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
                                u64, u64, unsigned int);
 
 /* file.c */
+void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
 void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
                                int);
 
 /* file_xxx.c */
-extern int squashfs_readpage_block(struct page *, u64, int);
+extern int squashfs_readpage_block(struct page *, u64, int, int);
 
 /* id.c */
 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
index 24d12fd..4e6853f 100644 (file)
 
 #define SQUASHFS_COMPRESSED_BLOCK(B)   (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
 
+static inline int squashfs_block_size(__le32 raw)
+{
+       u32 size = le32_to_cpu(raw);
+       return (size >> 25) ? -EIO : size;
+}
+
 /*
  * Inode number ops.  Inodes consist of a compressed block number, and an
  * uncompressed offset within that block
index 1da565c..ef69c31 100644 (file)
@@ -75,6 +75,7 @@ struct squashfs_sb_info {
        unsigned short                          block_log;
        long long                               bytes_used;
        unsigned int                            inodes;
+       unsigned int                            fragments;
        int                                     xattr_ids;
 };
 #endif
index 8a73b97..40e6573 100644 (file)
@@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
        msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
        msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
        msblk->inodes = le32_to_cpu(sblk->inodes);
+       msblk->fragments = le32_to_cpu(sblk->fragments);
        flags = le16_to_cpu(sblk->flags);
 
        TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
        TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
        TRACE("Block size %d\n", msblk->block_size);
        TRACE("Number of inodes %d\n", msblk->inodes);
-       TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
+       TRACE("Number of fragments %d\n", msblk->fragments);
        TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
        TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
        TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -272,7 +273,7 @@ allocate_id_index_table:
        sb->s_export_op = &squashfs_export_ops;
 
 handle_fragments:
-       fragments = le32_to_cpu(sblk->fragments);
+       fragments = msblk->fragments;
        if (fragments == 0)
                goto check_directory_table;
 
index 594d192..bad9cea 100644 (file)
@@ -633,8 +633,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                /* the various vma->vm_userfaultfd_ctx still points to it */
                down_write(&mm->mmap_sem);
                for (vma = mm->mmap; vma; vma = vma->vm_next)
-                       if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
+                       if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+                               vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
+                       }
                up_write(&mm->mmap_sem);
 
                userfaultfd_ctx_put(release_new_ctx);
index eef4662..75dbdc1 100644 (file)
@@ -223,12 +223,13 @@ xfs_alloc_get_rec(
        error = xfs_btree_get_rec(cur, &rec, stat);
        if (error || !(*stat))
                return error;
-       if (rec->alloc.ar_blockcount == 0)
-               goto out_bad_rec;
 
        *bno = be32_to_cpu(rec->alloc.ar_startblock);
        *len = be32_to_cpu(rec->alloc.ar_blockcount);
 
+       if (*len == 0)
+               goto out_bad_rec;
+
        /* check for valid extent range, including overflow */
        if (!xfs_verify_agbno(mp, agno, *bno))
                goto out_bad_rec;
index 33dc346..30d1d60 100644 (file)
@@ -731,7 +731,8 @@ xfs_inode_validate_extsize(
        if ((hint_flag || inherit_flag) && extsize == 0)
                return __this_address;
 
-       if (!(hint_flag || inherit_flag) && extsize != 0)
+       /* free inodes get flags set to zero but extsize remains */
+       if (mode && !(hint_flag || inherit_flag) && extsize != 0)
                return __this_address;
 
        if (extsize_bytes % blocksize_bytes)
@@ -777,7 +778,8 @@ xfs_inode_validate_cowextsize(
        if (hint_flag && cowextsize == 0)
                return __this_address;
 
-       if (!hint_flag && cowextsize != 0)
+       /* free inodes get flags set to zero but cowextsize remains */
+       if (mode && !hint_flag && cowextsize != 0)
                return __this_address;
 
        if (hint_flag && rt_flag)
index e3147eb..ca3f2c2 100644 (file)
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 
+/**
+ * blk_mq_mark_complete() - Set request state to complete
+ * @rq: request to set to complete state
+ *
+ * Returns true if request state was successfully set to complete. If
+ * successful, the caller is responsibile for seeing this request is ended, as
+ * blk_mq_complete_request will not work again.
+ */
+static inline bool blk_mq_mark_complete(struct request *rq)
+{
+       return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
+                       MQ_RQ_IN_FLIGHT;
+}
+
 /*
  * Driver command data is immediately after the request. So subtract request
  * size to get back to the original request, add request size to get the PDU.
index 687b176..f02cee0 100644 (file)
@@ -5,10 +5,10 @@
 #include <uapi/linux/bpfilter.h>
 
 struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
                            unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
-                           int *optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+                           int __user *optlen);
 extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
                                       char __user *optval,
                                       unsigned int optlen, bool is_set);
index a97a63e..3233fbe 100644 (file)
@@ -30,7 +30,7 @@ struct cpu {
 };
 
 extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
 extern void cpu_init(void);
 extern void trap_init(void);
 
index e6c0448..31c865d 100644 (file)
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
 
 static inline void delayacct_blkio_end(struct task_struct *p)
 {
-       if (current->delays)
+       if (p->delays)
                __delayacct_blkio_end(p);
        delayacct_clear_flag(DELAYACCT_PF_BLKIO);
 }
index 56add82..401e4b2 100644 (file)
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
        void *flush;
 } efi_file_handle_t;
 
+typedef struct {
+       u64 revision;
+       u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+       u64 revision;
+       u64 open_volume;
+} efi_file_io_interface_64_t;
+
 typedef struct _efi_file_io_interface {
        u64 revision;
        int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
 extern void efi_gettimeofday (struct timespec64 *ts);
 extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, if possible */
 #ifdef CONFIG_X86
-extern void efi_late_init(void);
 extern void efi_free_boot_services(void);
 extern efi_status_t efi_query_variable_store(u32 attributes,
                                             unsigned long size,
                                             bool nonblocking);
 extern void efi_find_mirror(void);
 #else
-static inline void efi_late_init(void) {}
 static inline void efi_free_boot_services(void) {}
 
 static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
 
 extern int efi_tpm_eventlog_init(void);
 
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
 #endif /* _LINUX_EFI_H */
index 7094718..ffcc772 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/fcntl.h>
 #include <linux/wait.h>
+#include <linux/err.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
index cbb872c..9d2ea3e 100644 (file)
@@ -73,6 +73,7 @@
 #define GICD_TYPER_MBIS                        (1U << 16)
 
 #define GICD_TYPER_ID_BITS(typer)      ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer)     ((((typer) >> 11) & 0x1f) + 1)
 #define GICD_TYPER_IRQS(typer)         ((((typer) & 0x1f) + 1) * 32)
 
 #define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
                phys_addr_t     phys_base;
        } __percpu              *rdist;
        struct page             *prop_page;
-       int                     id_bits;
        u64                     flags;
+       u32                     gicd_typer;
        bool                    has_vlpis;
        bool                    has_direct_lpi;
 };
index 80cbb7f..8395792 100644 (file)
@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
        struct mlx5_frag_buf    frag_buf;
        u32                     sz_m1;
        u32                     frag_sz_m1;
+       u32                     strides_offset;
        u8                      log_sz;
        u8                      log_stride;
        u8                      log_frag_strides;
@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
        return key & 0xffffff00u;
 }
 
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
-                                struct mlx5_frag_buf_ctrl *fbc)
+static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+                                       u32 strides_offset,
+                                       struct mlx5_frag_buf_ctrl *fbc)
 {
        fbc->log_stride = log_stride;
        fbc->log_sz     = log_sz;
        fbc->sz_m1      = (1 << fbc->log_sz) - 1;
        fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
        fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+       fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+                                struct mlx5_frag_buf_ctrl *fbc)
+{
+       mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
 }
 
 static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
                                          u32 ix)
 {
-       unsigned int frag = (ix >> fbc->log_frag_strides);
+       unsigned int frag;
+
+       ix  += fbc->strides_offset;
+       frag = ix >> fbc->log_frag_strides;
 
        return fbc->frag_buf.frags[frag].buf +
                ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
index d3a3842..68a5121 100644 (file)
@@ -452,6 +452,23 @@ struct vm_operations_struct {
                                          unsigned long addr);
 };
 
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+       static const struct vm_operations_struct dummy_vm_ops = {};
+
+       vma->vm_mm = mm;
+       vma->vm_ops = &dummy_vm_ops;
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+}
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+       vma->vm_ops = NULL;
+}
+
+/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
+#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
+
 struct mmu_gather;
 struct inode;
 
index abd5d5e..c133ccf 100644 (file)
@@ -368,7 +368,6 @@ struct pci_dev {
        unsigned int    transparent:1;          /* Subtractive decode bridge */
        unsigned int    multifunction:1;        /* Multi-function device */
 
-       unsigned int    is_added:1;
        unsigned int    is_busmaster:1;         /* Is busmaster */
        unsigned int    no_msi:1;               /* May not use MSI */
        unsigned int    no_64bit_msi:1;         /* May only use 32-bit MSIs */
index 1fa1288..87f6db4 100644 (file)
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
                   u32 max_stack, bool crosstask, bool add_mark);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
 
index 36df6cc..4786c22 100644 (file)
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  * @member:    the name of the list_head within the struct.
  *
  * Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
  */
 #define list_for_each_entry_continue_rcu(pos, head, member)            \
        for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  *
  * Iterate over the tail of a list starting from a given position,
  * which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
  */
 #define list_for_each_entry_from_rcu(pos, head, member)                        \
        for (; &(pos)->member != (head);                                        \
index 65163aa..75e5b39 100644 (file)
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
 
 void __rcu_read_lock(void);
 void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
 void synchronize_rcu(void);
 
 /*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
        } while (0)
 
 /*
- * Note a voluntary context switch for RCU-tasks benefit.  This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
  */
 #ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
        do { \
                if (READ_ONCE((t)->rcu_tasks_holdout)) \
                        WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
 #define rcu_note_voluntary_context_switch(t) \
        do { \
                rcu_all_qs(); \
-               rcu_note_voluntary_context_switch_lite(t); \
+               rcu_tasks_qs(t); \
        } while (0)
 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void exit_tasks_rcu_start(void);
 void exit_tasks_rcu_finish(void);
 #else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t)      do { } while (0)
+#define rcu_tasks_qs(t)        do { } while (0)
 #define rcu_note_voluntary_context_switch(t)           rcu_all_qs()
 #define call_rcu_tasks call_rcu_sched
 #define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
  */
 #define cond_resched_tasks_rcu_qs() \
 do { \
-       if (!cond_resched()) \
-               rcu_note_voluntary_context_switch_lite(current); \
+       rcu_tasks_qs(current); \
+       cond_resched(); \
 } while (0)
 
 /*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
  * This is simply an identity function, but it documents where a pointer
  * is handed off from RCU to some other synchronization mechanism, for
  * example, reference counting or locking.  In C11, it would map to
- * kill_dependency().  It could be used as follows:
- * ``
+ * kill_dependency().  It could be used as follows::
+ *
  *     rcu_read_lock();
  *     p = rcu_dereference(gp);
  *     long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
  *                     p = rcu_pointer_handoff(p);
  *     }
  *     rcu_read_unlock();
- *``
  */
 #define rcu_pointer_handoff(p) (p)
 
index 7b3c82e..8d9a0ea 100644 (file)
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 #define rcu_note_context_switch(preempt) \
        do { \
                rcu_sched_qs(); \
-               rcu_note_voluntary_context_switch_lite(current); \
+               rcu_tasks_qs(current); \
        } while (0)
 
 static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
index b72ebdf..003d09a 100644 (file)
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
 void ring_buffer_record_off(struct ring_buffer *buffer);
 void ring_buffer_record_on(struct ring_buffer *buffer);
 int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
index 1b92a28..6fd615a 100644 (file)
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#else
 extern void rt_mutex_lock(struct rt_mutex *lock);
+#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#endif
+
 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
                               struct hrtimer_sleeper *timeout);
index 91494d7..3e72a29 100644 (file)
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
        return retval;
 }
 
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+       int retval;
+
+       retval = __srcu_read_lock(sp);
+       return retval;
+}
+
 /**
  * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
  * @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
        __srcu_read_unlock(sp, idx);
 }
 
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+       __srcu_read_unlock(sp, idx);
+}
+
 /**
  * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
  *
index 6627286..61dfd93 100644 (file)
@@ -64,6 +64,8 @@ struct torture_random_state {
        long trs_count;
 };
 #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+       DEFINE_PER_CPU(struct torture_random_state, name)
 unsigned long torture_random(struct torture_random_state *trsp);
 
 /* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
 int torture_stutter_init(int s);
 
 /* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
 void torture_init_end(void);
 bool torture_cleanup_begin(void);
 void torture_cleanup_end(void);
index 9324ac2..43913ae 100644 (file)
@@ -64,7 +64,8 @@ struct vsock_sock {
        struct list_head pending_links;
        struct list_head accept_queue;
        bool rejected;
-       struct delayed_work dwork;
+       struct delayed_work connect_work;
+       struct delayed_work pending_work;
        struct delayed_work close_work;
        bool close_work_scheduled;
        u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
 
 s64 vsock_stream_has_data(struct vsock_sock *vsk);
 s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
index 5fbfe61..1beb3ea 100644 (file)
@@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 /**
  * cfg80211_rx_control_port - notification about a received control port frame
  * @dev: The device the frame matched to
- * @buf: control port frame
- * @len: length of the frame data
- * @addr: The peer from which the frame was received
- * @proto: frame protocol, typically PAE or Pre-authentication
+ * @skb: The skbuf with the control port frame.  It is assumed that the skbuf
+ *     is 802.3 formatted (with 802.3 header).  The skb can be non-linear.
+ *     This function does not take ownership of the skb, so the caller is
+ *     responsible for any cleanup.  The caller must also ensure that
+ *     skb->protocol is set appropriately.
  * @unencrypted: Whether the frame was received unencrypted
  *
  * This function is used to inform userspace about a received control port
@@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
  * Return: %true if the frame was passed to userspace
  */
 bool cfg80211_rx_control_port(struct net_device *dev,
-                             const u8 *buf, size_t len,
-                             const u8 *addr, u16 proto, bool unencrypted);
+                             struct sk_buff *skb, bool unencrypted);
 
 /**
  * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
index 71b9043..3d49305 100644 (file)
@@ -281,6 +281,11 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
        atomic_inc(&f6i->fib6_ref);
 }
 
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+       return atomic_inc_not_zero(&f6i->fib6_ref);
+}
+
 static inline void fib6_info_release(struct fib6_info *f6i)
 {
        if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
index dc35f25..890a873 100644 (file)
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
        refcount_inc(&sap->refcnt);
 }
 
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+       return refcount_inc_not_zero(&sap->refcnt);
+}
+
 void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
index 08c005c..dc417ef 100644 (file)
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
  *     @portid: netlink portID of the original message
  *     @seq: netlink sequence number
  *     @family: protocol family
+ *     @level: depth of the chains
  *     @report: notify via unicast netlink message
  */
 struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
        u32                             portid;
        u32                             seq;
        u8                              family;
+       u8                              level;
        bool                            report;
 };
 
@@ -865,7 +867,6 @@ enum nft_chain_flags {
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
- *     @level: length of longest path to this chain
  *     @flags: bitmask of enum nft_chain_flags
  *     @name: name of the chain
  */
@@ -878,7 +879,6 @@ struct nft_chain {
        struct nft_table                *table;
        u64                             handle;
        u32                             use;
-       u16                             level;
        u8                              flags:6,
                                        genmask:2;
        char                            *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
        u32                             genmask:2,
                                        use:30;
        u64                             handle;
-       char                            *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
        /* runtime data below here */
        struct nf_hook_ops              *ops ____cacheline_aligned;
        struct nf_flowtable             data;
index 3482d13..cd3ecda 100644 (file)
@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
                        struct pipe_inode_info *pipe, size_t len,
                        unsigned int flags);
 
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
 static inline void tcp_dec_quickack_mode(struct sock *sk,
                                         const unsigned int pkts)
 {
@@ -539,6 +540,7 @@ void tcp_send_fin(struct sock *sk);
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
 void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
@@ -838,6 +840,11 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
  * as TCP moves IP6CB into a different location in skb->cb[]
  */
 static inline int tcp_v6_iif(const struct sk_buff *skb)
+{
+       return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
 {
        bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 
index 5936aac..a8d07fe 100644 (file)
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
  *     "cpuqs": CPU passes through a quiescent state.
  *     "cpuonl": CPU comes online.
  *     "cpuofl": CPU goes offline.
+ *     "cpuofl-bgp": CPU goes offline while blocking a grace period.
  *     "reqwait": GP kthread sleeps waiting for grace-period request.
  *     "reqwaitsig": GP kthread awakened by signal from reqwait state.
  *     "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
  */
 TRACE_EVENT(rcu_grace_period,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
 
-       TP_ARGS(rcuname, gpnum, gpevent),
+       TP_ARGS(rcuname, gp_seq, gpevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(const char *, gpevent)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->gpevent = gpevent;
        ),
 
        TP_printk("%s %lu %s",
-                 __entry->rcuname, __entry->gpnum, __entry->gpevent)
+                 __entry->rcuname, __entry->gp_seq, __entry->gpevent)
 );
 
 /*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
  *
  * "Startleaf": Request a grace period based on leaf-node data.
  * "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
  * "Startedroot": Requested a nocb grace period based on root-node data.
  * "NoGPkthread": The RCU grace-period kthread has not yet started.
  * "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
  */
 TRACE_EVENT(rcu_future_grace_period,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
-                unsigned long c, u8 level, int grplo, int grphi,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq,
+                unsigned long gp_seq_req, u8 level, int grplo, int grphi,
                 const char *gpevent),
 
-       TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+       TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
-               __field(unsigned long, completed)
-               __field(unsigned long, c)
+               __field(unsigned long, gp_seq)
+               __field(unsigned long, gp_seq_req)
                __field(u8, level)
                __field(int, grplo)
                __field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
-               __entry->completed = completed;
-               __entry->c = c;
+               __entry->gp_seq = gp_seq;
+               __entry->gp_seq_req = gp_seq_req;
                __entry->level = level;
                __entry->grplo = grplo;
                __entry->grphi = grphi;
                __entry->gpevent = gpevent;
        ),
 
-       TP_printk("%s %lu %lu %lu %u %d %d %s",
-                 __entry->rcuname, __entry->gpnum, __entry->completed,
-                 __entry->c, __entry->level, __entry->grplo, __entry->grphi,
-                 __entry->gpevent)
+       TP_printk("%s %lu %lu %u %d %d %s",
+                 __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->gpevent)
 );
 
 /*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
  */
 TRACE_EVENT(rcu_grace_period_init,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
                 int grplo, int grphi, unsigned long qsmask),
 
-       TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+       TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(u8, level)
                __field(int, grplo)
                __field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->level = level;
                __entry->grplo = grplo;
                __entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
        ),
 
        TP_printk("%s %lu %u %d %d %lx",
-                 __entry->rcuname, __entry->gpnum, __entry->level,
+                 __entry->rcuname, __entry->gp_seq, __entry->level,
                  __entry->grplo, __entry->grphi, __entry->qsmask)
 );
 
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
  */
 TRACE_EVENT(rcu_preempt_task,
 
-       TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+       TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
 
-       TP_ARGS(rcuname, pid, gpnum),
+       TP_ARGS(rcuname, pid, gp_seq),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, pid)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->pid = pid;
        ),
 
        TP_printk("%s %lu %d",
-                 __entry->rcuname, __entry->gpnum, __entry->pid)
+                 __entry->rcuname, __entry->gp_seq, __entry->pid)
 );
 
 /*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
  */
 TRACE_EVENT(rcu_unlock_preempted_task,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
 
-       TP_ARGS(rcuname, gpnum, pid),
+       TP_ARGS(rcuname, gp_seq, pid),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, pid)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->pid = pid;
        ),
 
-       TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+       TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
 );
 
 /*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
  */
 TRACE_EVENT(rcu_quiescent_state_report,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq,
                 unsigned long mask, unsigned long qsmask,
                 u8 level, int grplo, int grphi, int gp_tasks),
 
-       TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+       TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(unsigned long, mask)
                __field(unsigned long, qsmask)
                __field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->mask = mask;
                __entry->qsmask = qsmask;
                __entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
        ),
 
        TP_printk("%s %lu %lx>%lx %u %d %d %u",
-                 __entry->rcuname, __entry->gpnum,
+                 __entry->rcuname, __entry->gp_seq,
                  __entry->mask, __entry->qsmask, __entry->level,
                  __entry->grplo, __entry->grphi, __entry->gp_tasks)
 );
 
 /*
  * Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
  */
 TRACE_EVENT(rcu_fqs,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
 
-       TP_ARGS(rcuname, gpnum, cpu, qsevent),
+       TP_ARGS(rcuname, gp_seq, cpu, qsevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, cpu)
                __field(const char *, qsevent)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->cpu = cpu;
                __entry->qsevent = qsevent;
        ),
 
        TP_printk("%s %lu %d %s",
-                 __entry->rcuname, __entry->gpnum,
+                 __entry->rcuname, __entry->gp_seq,
                  __entry->cpu, __entry->qsevent)
 );
 
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
 
 #else /* #ifdef CONFIG_RCU_TRACE */
 
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
                                      level, grplo, grphi, event) \
                                      do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
                                    qsmask) do { } while (0)
 #define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
        do { } while (0)
 #define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
        do { } while (0)
 #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
                                         grplo, grphi, gp_tasks) do { } \
        while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
index 0b5ddbe..972265f 100644 (file)
@@ -76,7 +76,7 @@ struct btf_type {
  */
 #define BTF_INT_ENCODING(VAL)  (((VAL) & 0x0f000000) >> 24)
 #define BTF_INT_OFFSET(VAL)    (((VAL  & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL)      ((VAL)  & 0x0000ffff)
+#define BTF_INT_BITS(VAL)      ((VAL)  & 0x000000ff)
 
 /* Attributes stored in the BTF_INT_ENCODING */
 #define BTF_INT_SIGNED (1 << 0)
index b8e288a..eeb787b 100644 (file)
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
        PERF_SAMPLE_PHYS_ADDR                   = 1U << 19,
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
+
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
 };
 
 /*
index 3b4ada1..5e13c54 100644 (file)
@@ -561,8 +561,8 @@ asmlinkage __visible void __init start_kernel(void)
        setup_command_line(command_line);
        setup_nr_cpu_ids();
        setup_per_cpu_areas();
-       boot_cpu_state_init();
        smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+       boot_cpu_hotplug_init();
 
        build_all_zonelists(NULL);
        page_alloc_init();
index 5af1943..76e95e4 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2118,7 +2118,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
        }
 
        do {
-               queue.status = -EINTR;
+               WRITE_ONCE(queue.status, -EINTR);
                queue.sleeper = current;
 
                __set_current_state(TASK_INTERRUPTIBLE);
index 051a3e1..fefa00d 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -427,6 +427,17 @@ static int shm_split(struct vm_area_struct *vma, unsigned long addr)
        return 0;
 }
 
+static unsigned long shm_pagesize(struct vm_area_struct *vma)
+{
+       struct file *file = vma->vm_file;
+       struct shm_file_data *sfd = shm_file_data(file);
+
+       if (sfd->vm_ops->pagesize)
+               return sfd->vm_ops->pagesize(vma);
+
+       return PAGE_SIZE;
+}
+
 #ifdef CONFIG_NUMA
 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 {
@@ -554,6 +565,7 @@ static const struct vm_operations_struct shm_vm_ops = {
        .close  = shm_close,    /* callback for when the vm-area is released */
        .fault  = shm_fault,
        .split  = shm_split,
+       .pagesize = shm_pagesize,
 #if defined(CONFIG_NUMA)
        .set_policy = shm_set_policy,
        .get_policy = shm_get_policy,
index ceb1c45..80d672a 100644 (file)
@@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic)
                break;
        case AUDIT_KERN_MODULE:
                audit_log_format(ab, "name=");
-               audit_log_untrustedstring(ab, context->module.name);
-               kfree(context->module.name);
+               if (context->module.name) {
+                       audit_log_untrustedstring(ab, context->module.name);
+                       kfree(context->module.name);
+               } else
+                       audit_log_format(ab, "(null)");
+
                break;
        }
        audit_log_end(ab);
@@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name)
 {
        struct audit_context *context = audit_context();
 
-       context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
-       strcpy(context->module.name, name);
+       context->module.name = kstrdup(name, GFP_KERNEL);
+       if (!context->module.name)
+               audit_log_lost("out of memory in __audit_log_kern_module");
        context->type = AUDIT_KERN_MODULE;
 }
 
index 544e58f..2aa55d0 100644 (file)
@@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
                return -EINVAL;
 
        value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
-       if (!value_type || value_size > map->value_size)
+       if (!value_type || value_size != map->value_size)
                return -EINVAL;
 
        return 0;
index e016ac3..2590700 100644 (file)
@@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
  */
 static bool btf_type_int_is_regular(const struct btf_type *t)
 {
-       u16 nr_bits, nr_bytes;
+       u8 nr_bits, nr_bytes;
        u32 int_data;
 
        int_data = btf_type_int(t);
@@ -993,12 +993,16 @@ static void btf_int_bits_seq_show(const struct btf *btf,
 {
        u16 left_shift_bits, right_shift_bits;
        u32 int_data = btf_type_int(t);
-       u16 nr_bits = BTF_INT_BITS(int_data);
-       u16 total_bits_offset;
-       u16 nr_copy_bytes;
-       u16 nr_copy_bits;
+       u8 nr_bits = BTF_INT_BITS(int_data);
+       u8 total_bits_offset;
+       u8 nr_copy_bytes;
+       u8 nr_copy_bits;
        u64 print_num;
 
+       /*
+        * bits_offset is at most 7.
+        * BTF_INT_OFFSET() cannot exceed 64 bits.
+        */
        total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
        data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
        bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
@@ -1028,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
        u32 int_data = btf_type_int(t);
        u8 encoding = BTF_INT_ENCODING(int_data);
        bool sign = encoding & BTF_INT_SIGNED;
-       u32 nr_bits = BTF_INT_BITS(int_data);
+       u8 nr_bits = BTF_INT_BITS(int_data);
 
        if (bits_offset || BTF_INT_OFFSET(int_data) ||
            BITS_PER_BYTE_MASKED(nr_bits)) {
@@ -1515,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
 {
        bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
        const struct btf_member *member;
+       u32 meta_needed, last_offset;
        struct btf *btf = env->btf;
        u32 struct_size = t->size;
-       u32 meta_needed;
        u16 i;
 
        meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1530,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
 
        btf_verifier_log_type(env, t, NULL);
 
+       last_offset = 0;
        for_each_member(i, t, member) {
                if (!btf_name_offset_valid(btf, member->name_off)) {
                        btf_verifier_log_member(env, t, member,
@@ -1551,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               /*
+                * ">" instead of ">=" because the last member could be
+                * "char a[0];"
+                */
+               if (last_offset > member->offset) {
+                       btf_verifier_log_member(env, t, member,
+                                               "Invalid member bits_offset");
+                       return -EINVAL;
+               }
+
                if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
                        btf_verifier_log_member(env, t, member,
                                                "Memmber bits_offset exceeds its struct size");
@@ -1558,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                }
 
                btf_verifier_log_member(env, t, member, NULL);
+               last_offset = member->offset;
        }
 
        return meta_needed;
index e0918d1..46f5f29 100644 (file)
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
 };
 
 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
-                            struct xdp_bulk_queue *bq);
+                            struct xdp_bulk_queue *bq, bool in_napi_ctx);
 
 static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
 {
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
                struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
 
                /* No concurrent bq_enqueue can run at this point */
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, false);
        }
        free_percpu(rcpu->bulkq);
        /* Cannot kthread_stop() here, last put free rcpu resources */
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
 };
 
 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
-                            struct xdp_bulk_queue *bq)
+                            struct xdp_bulk_queue *bq, bool in_napi_ctx)
 {
        unsigned int processed = 0, drops = 0;
        const int to_cpu = rcpu->cpu;
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
                err = __ptr_ring_produce(q, xdpf);
                if (err) {
                        drops++;
-                       xdp_return_frame_rx_napi(xdpf);
+                       if (likely(in_napi_ctx))
+                               xdp_return_frame_rx_napi(xdpf);
+                       else
+                               xdp_return_frame(xdpf);
                }
                processed++;
        }
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
        struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
 
        if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, true);
 
        /* Notice, xdp_buff/page MUST be queued here, long enough for
         * driver to code invoking us to finished, due to driver
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
 
                /* Flush all frames in bulkq to real queue */
                bq = this_cpu_ptr(rcpu->bulkq);
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, true);
 
                /* If already running, costs spin_lock_irqsave + smb_mb */
                wake_up_process(rcpu->kthread);
index d361fc1..750d45e 100644 (file)
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
 }
 
 static int bq_xmit_all(struct bpf_dtab_netdev *obj,
-                      struct xdp_bulk_queue *bq, u32 flags)
+                      struct xdp_bulk_queue *bq, u32 flags,
+                      bool in_napi_ctx)
 {
        struct net_device *dev = obj->dev;
        int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
                struct xdp_frame *xdpf = bq->q[i];
 
                /* RX path under NAPI protection, can return frames faster */
-               xdp_return_frame_rx_napi(xdpf);
+               if (likely(in_napi_ctx))
+                       xdp_return_frame_rx_napi(xdpf);
+               else
+                       xdp_return_frame(xdpf);
                drops++;
        }
        goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
                __clear_bit(bit, bitmap);
 
                bq = this_cpu_ptr(dev->bulkq);
-               bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+               bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
        }
 }
 
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
        struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
 
        if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
-               bq_xmit_all(obj, bq, 0);
+               bq_xmit_all(obj, bq, 0, true);
 
        /* Ingress dev_rx will be the same for all xdp_frame's in
         * bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
                        __clear_bit(dev->bit, bitmap);
 
                        bq = per_cpu_ptr(dev->bulkq, cpu);
-                       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+                       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
                }
        }
 }
index 98fb793..c4d75c5 100644 (file)
@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
        while (msg_data_left(msg)) {
-               struct sk_msg_buff *m;
+               struct sk_msg_buff *m = NULL;
                bool enospc = false;
                int copy;
 
                if (sk->sk_err) {
-                       err = sk->sk_err;
+                       err = -sk->sk_err;
                        goto out_err;
                }
 
@@ -1116,8 +1116,11 @@ wait_for_sndbuf:
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
-               if (err)
+               if (err) {
+                       if (m && m != psock->cork)
+                               free_start_sg(sk, m);
                        goto out_err;
+               }
        }
 out_err:
        if (err < 0)
index 191097c..15be70a 100644 (file)
@@ -2015,7 +2015,7 @@ void __init boot_cpu_init(void)
 /*
  * Must be called _AFTER_ setting up the per_cpu areas
  */
-void __init boot_cpu_state_init(void)
+void __init boot_cpu_hotplug_init(void)
 {
        per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
 }
index 8f0434a..eec2d5f 100644 (file)
@@ -6343,7 +6343,7 @@ static u64 perf_virt_to_phys(u64 virt)
 
 static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
 
-static struct perf_callchain_entry *
+struct perf_callchain_entry *
 perf_callchain(struct perf_event *event, struct pt_regs *regs)
 {
        bool kernel = !event->attr.exclude_callchain_kernel;
@@ -6382,7 +6382,9 @@ void perf_prepare_sample(struct perf_event_header *header,
        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
                int size = 1;
 
-               data->callchain = perf_callchain(event, regs);
+               if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
+                       data->callchain = perf_callchain(event, regs);
+
                size += data->callchain->nr;
 
                header->size += size * sizeof(u64);
@@ -7335,6 +7337,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
                                     struct file *file, unsigned long offset,
                                     unsigned long size)
 {
+       /* d_inode(NULL) won't be equal to any mapped user-space file */
+       if (!filter->path.dentry)
+               return false;
+
        if (d_inode(filter->path.dentry) != file_inode(file))
                return false;
 
index a191c05..1b27bab 100644 (file)
@@ -312,10 +312,8 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
 {
        struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 
-       if (vma) {
-               vma->vm_mm = mm;
-               INIT_LIST_HEAD(&vma->anon_vma_chain);
-       }
+       if (vma)
+               vma_init(vma, mm);
        return vma;
 }
 
index c6766f3..5f3e2ba 100644 (file)
@@ -134,7 +134,6 @@ config GENERIC_IRQ_DEBUGFS
 endmenu
 
 config GENERIC_IRQ_MULTI_HANDLER
-       depends on !MULTI_IRQ_HANDLER
        bool
        help
          Allow to specify the low level IRQ handler at run time.
index afc7f90..578d0e5 100644 (file)
@@ -443,6 +443,7 @@ static void free_desc(unsigned int irq)
         * We free the descriptor, masks and stat fields via RCU. That
         * allows demultiplex interrupts to do rcu based management of
         * the child interrupts.
+        * This also allows us to use rcu in kstat_irqs_usr().
         */
        call_rcu(&desc->rcu, delayed_free_desc);
 }
@@ -928,17 +929,17 @@ unsigned int kstat_irqs(unsigned int irq)
  * kstat_irqs_usr - Get the statistics for an interrupt
  * @irq:       The interrupt number
  *
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. Contrary to kstat_irqs() this can be called from any
- * preemptible context. It's protected against concurrent removal of
- * an interrupt descriptor when sparse irqs are enabled.
+ * Returns the sum of interrupt counts on all cpus since boot for @irq.
+ * Contrary to kstat_irqs() this can be called from any context.
+ * It uses rcu since a concurrent removal of an interrupt descriptor is
+ * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
  */
 unsigned int kstat_irqs_usr(unsigned int irq)
 {
        unsigned int sum;
 
-       irq_lock_sparse();
+       rcu_read_lock();
        sum = kstat_irqs(irq);
-       irq_unlock_sparse();
+       rcu_read_unlock();
        return sum;
 }
index daeabd7..fb86146 100644 (file)
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
-       set_current_state(TASK_INTERRUPTIBLE);
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-       while (!kthread_should_stop()) {
+               if (kthread_should_stop()) {
+                       /* may need to run one last time */
+                       if (test_and_clear_bit(IRQTF_RUNTHREAD,
+                                              &action->thread_flags)) {
+                               __set_current_state(TASK_RUNNING);
+                               return 0;
+                       }
+                       __set_current_state(TASK_RUNNING);
+                       return -1;
+               }
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
-               set_current_state(TASK_INTERRUPTIBLE);
        }
-       __set_current_state(TASK_RUNNING);
-       return -1;
 }
 
 /*
@@ -1024,11 +1031,8 @@ static int irq_thread(void *data)
        /*
         * This is the regular exit path. __free_irq() is stopping the
         * thread via kthread_stop() after calling
-        * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
-        * oneshot mask bit can be set. We cannot verify that as we
-        * cannot touch the oneshot mask at this point anymore as
-        * __setup_irq() might have given out currents thread_mask
-        * again.
+        * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+        * oneshot mask bit can be set.
         */
        task_work_cancel(current, irq_thread_dtor);
        return 0;
@@ -1068,6 +1072,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
                return 0;
 
+       /*
+        * No further action required for interrupts which are requested as
+        * threaded interrupts already
+        */
+       if (new->handler == irq_default_primary_handler)
+               return 0;
+
        new->flags |= IRQF_ONESHOT;
 
        /*
@@ -1075,7 +1086,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
         * thread handler. We force thread them as well by creating a
         * secondary action.
         */
-       if (new->handler != irq_default_primary_handler && new->thread_fn) {
+       if (new->handler && new->thread_fn) {
                /* Allocate the secondary action */
                new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
                if (!new->secondary)
@@ -1244,8 +1255,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
        /*
         * Protects against a concurrent __free_irq() call which might wait
-        * for synchronize_irq() to complete without holding the optional
-        * chip bus lock and desc->lock.
+        * for synchronize_hardirq() to complete without holding the optional
+        * chip bus lock and desc->lock. Also protects against handing out
+        * a recycled oneshot thread_mask bit while it's still in use by
+        * its previous owner.
         */
        mutex_lock(&desc->request_mutex);
 
@@ -1564,9 +1577,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
 
        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
 
-       if (!desc)
-               return NULL;
-
        mutex_lock(&desc->request_mutex);
        chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1613,11 +1623,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        /*
         * Drop bus_lock here so the changes which were done in the chip
         * callbacks above are synced out to the irq chips which hang
-        * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+        * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
         *
         * Aside of that the bus_lock can also be taken from the threaded
         * handler in irq_finalize_oneshot() which results in a deadlock
-        * because synchronize_irq() would wait forever for the thread to
+        * because kthread_stop() would wait forever for the thread to
         * complete, which is blocked on the bus lock.
         *
         * The still held desc->request_mutex() protects against a
@@ -1629,7 +1639,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        unregister_handler_proc(irq, action);
 
        /* Make sure it's not being used on another CPU: */
-       synchronize_irq(irq);
+       synchronize_hardirq(irq);
 
 #ifdef CONFIG_DEBUG_SHIRQ
        /*
@@ -1638,7 +1648,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
         * is so by doing an extra call to the handler ....
         *
         * ( We do this after actually deregistering it, to make sure that a
-        *   'real' IRQ doesn't run in parallel with our fake. )
+        *   'real' IRQ doesn't run in parallel with our fake. )
         */
        if (action->flags & IRQF_SHARED) {
                local_irq_save(flags);
@@ -1647,6 +1657,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        }
 #endif
 
+       /*
+        * The action has already been removed above, but the thread writes
+        * its oneshot mask bit when it completes. Though request_mutex is
+        * held across this which prevents __setup_irq() from handing out
+        * the same bit to a newly requested action.
+        */
        if (action->thread) {
                kthread_stop(action->thread);
                put_task_struct(action->thread);
index 37eda10..da9addb 100644 (file)
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 
-       irq_lock_sparse();
+       rcu_read_lock();
        desc = irq_to_desc(i);
        if (!desc)
                goto outsparse;
 
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       for_each_online_cpu(j)
-               any_count |= kstat_irqs_cpu(i, j);
-       action = desc->action;
-       if ((!action || irq_desc_is_chained(desc)) && !any_count)
-               goto out;
+       if (desc->kstat_irqs)
+               for_each_online_cpu(j)
+                       any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+
+       if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+               goto outsparse;
 
        seq_printf(p, "%*d: ", prec, i);
        for_each_online_cpu(j)
-               seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+               seq_printf(p, "%10u ", desc->kstat_irqs ?
+                                       *per_cpu_ptr(desc->kstat_irqs, j) : 0);
 
+       raw_spin_lock_irqsave(&desc->lock, flags);
        if (desc->irq_data.chip) {
                if (desc->irq_data.chip->irq_print_chip)
                        desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
        if (desc->name)
                seq_printf(p, "-%-8s", desc->name);
 
+       action = desc->action;
        if (action) {
                seq_printf(p, "  %s", action->name);
                while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        seq_putc(p, '\n');
-out:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 outsparse:
-       irq_unlock_sparse();
+       rcu_read_unlock();
        return 0;
 }
 #endif
index 11b591e..087d18d 100644 (file)
@@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
        task = create->result;
        if (!IS_ERR(task)) {
                static const struct sched_param param = { .sched_priority = 0 };
+               char name[TASK_COMM_LEN];
 
-               vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+               /*
+                * task is already visible to other tasks, so updating
+                * COMM must be protected.
+                */
+               vsnprintf(name, sizeof(name), namefmt, args);
+               set_task_comm(task, name);
                /*
                 * root may have changed our (kthreadd's) priority or CPU mask.
                 * The kernel thread should not inherit these properties.
index 8402b33..57bef4f 100644 (file)
@@ -21,6 +21,9 @@
  *          Davidlohr Bueso <dave@stgolabs.net>
  *     Based on kernel/rcu/torture.c.
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
@@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
 torture_param(int, stat_interval, 60,
             "Number of seconds between stats printk()s");
 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
 
 static char *torture_type = "spin_lock";
index 4f014be..2823d41 100644 (file)
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
                rt_mutex_postunlock(&wake_q);
 }
 
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+{
+       might_sleep();
+
+       mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/**
+ * rt_mutex_lock_nested - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ * @subclass: the lockdep subclass
+ */
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+{
+       __rt_mutex_lock(lock, subclass);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+#endif
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
 /**
  * rt_mutex_lock - lock a rt_mutex
  *
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
  */
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
-       might_sleep();
-
-       mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+       __rt_mutex_lock(lock, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
+#endif
 
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
index 5857267..3828336 100644 (file)
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        unsigned long pfn, pgoff, order;
        pgprot_t pgprot = PAGE_KERNEL;
        int error, nid, is_ram;
+       struct dev_pagemap *conflict_pgmap;
 
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
                - align_start;
+       align_end = align_start + align_size - 1;
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
        is_ram = region_intersects(align_start, align_size,
                IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 
        mutex_lock(&pgmap_lock);
        error = 0;
-       align_end = align_start + align_size - 1;
 
        foreach_order_pgoff(res, order, pgoff) {
                error = __radix_tree_insert(&pgmap_radix,
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
-EXPORT_SYMBOL_GPL(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
 static atomic_t devmap_enable;
 
 /*
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
        } else if (!count)
                __put_page(page);
 }
-EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
index 40cea67..4d04683 100644 (file)
@@ -91,7 +91,17 @@ static inline void rcu_seq_end(unsigned long *sp)
        WRITE_ONCE(*sp, rcu_seq_endval(sp));
 }
 
-/* Take a snapshot of the update side's sequence number. */
+/*
+ * rcu_seq_snap - Take a snapshot of the update side's sequence number.
+ *
+ * This function returns the earliest value of the grace-period sequence number
+ * that will indicate that a full grace period has elapsed since the current
+ * time.  Once the grace-period sequence number has reached this value, it will
+ * be safe to invoke all callbacks that have been registered prior to the
+ * current time. This value is the current grace-period number plus two to the
+ * power of the number of low-order bits reserved for state, then rounded up to
+ * the next value in which the state bits are all zero.
+ */
 static inline unsigned long rcu_seq_snap(unsigned long *sp)
 {
        unsigned long s;
@@ -107,6 +117,15 @@ static inline unsigned long rcu_seq_current(unsigned long *sp)
        return READ_ONCE(*sp);
 }
 
+/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not the
+ * corresponding update-side operation has started.
+ */
+static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
+{
+       return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
+}
+
 /*
  * Given a snapshot from rcu_seq_snap(), determine whether or not a
  * full update-side operation has occurred.
@@ -116,6 +135,45 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
        return ULONG_CMP_GE(READ_ONCE(*sp), s);
 }
 
+/*
+ * Has a grace period completed since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
+{
+       return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
+}
+
+/*
+ * Has a grace period started since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
+{
+       return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
+                           new);
+}
+
+/*
+ * Roughly how many full grace periods have elapsed between the collection
+ * of the two specified grace periods?
+ */
+static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
+{
+       unsigned long rnd_diff;
+
+       if (old == new)
+               return 0;
+       /*
+        * Compute the number of grace periods (still shifted up), plus
+        * one if either of new and old is not an exact grace period.
+        */
+       rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
+                  ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
+                  ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
+       if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
+               return 1; /* Definitely no grace period has elapsed. */
+       return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
+}
+
 /*
  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
  * by call_rcu() and rcu callback execution, and are therefore not part of the
@@ -276,6 +334,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 /* Is this rcu_node a leaf? */
 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 
+/* Is this rcu_node the last leaf? */
+#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+
 /*
  * Do a full breadth-first scan of the rcu_node structures for the
  * specified rcu_state structure.
@@ -405,8 +466,7 @@ enum rcutorture_type {
 
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-                           unsigned long *gpnum, unsigned long *completed);
-void rcutorture_record_test_transition(void);
+                           unsigned long *gp_seq);
 void rcutorture_record_progress(unsigned long vernum);
 void do_trace_rcu_torture_read(const char *rcutorturename,
                               struct rcu_head *rhp,
@@ -415,15 +475,11 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
                               unsigned long c);
 #else
 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
-                                         int *flags,
-                                         unsigned long *gpnum,
-                                         unsigned long *completed)
+                                         int *flags, unsigned long *gp_seq)
 {
        *flags = 0;
-       *gpnum = 0;
-       *completed = 0;
+       *gp_seq = 0;
 }
-static inline void rcutorture_record_test_transition(void) { }
 static inline void rcutorture_record_progress(unsigned long vernum) { }
 #ifdef CONFIG_RCU_TRACE
 void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -441,31 +497,26 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
 
 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
                                           struct srcu_struct *sp, int *flags,
-                                          unsigned long *gpnum,
-                                          unsigned long *completed)
+                                          unsigned long *gp_seq)
 {
        if (test_type != SRCU_FLAVOR)
                return;
        *flags = 0;
-       *completed = sp->srcu_idx;
-       *gpnum = *completed;
+       *gp_seq = sp->srcu_idx;
 }
 
 #elif defined(CONFIG_TREE_SRCU)
 
 void srcutorture_get_gp_data(enum rcutorture_type test_type,
                             struct srcu_struct *sp, int *flags,
-                            unsigned long *gpnum, unsigned long *completed);
+                            unsigned long *gp_seq);
 
 #endif
 
 #ifdef CONFIG_TINY_RCU
-static inline unsigned long rcu_batches_started(void) { return 0; }
-static inline unsigned long rcu_batches_started_bh(void) { return 0; }
-static inline unsigned long rcu_batches_started_sched(void) { return 0; }
-static inline unsigned long rcu_batches_completed(void) { return 0; }
-static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
-static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
+static inline unsigned long rcu_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
 static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
 static inline unsigned long
@@ -474,19 +525,16 @@ static inline void rcu_force_quiescent_state(void) { }
 static inline void rcu_bh_force_quiescent_state(void) { }
 static inline void rcu_sched_force_quiescent_state(void) { }
 static inline void show_rcu_gp_kthreads(void) { }
+static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
 #else /* #ifdef CONFIG_TINY_RCU */
-extern unsigned long rcutorture_testseq;
-extern unsigned long rcutorture_vernum;
-unsigned long rcu_batches_started(void);
-unsigned long rcu_batches_started_bh(void);
-unsigned long rcu_batches_started_sched(void);
-unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
-unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_get_gp_seq(void);
+unsigned long rcu_bh_get_gp_seq(void);
+unsigned long rcu_sched_get_gp_seq(void);
 unsigned long rcu_exp_batches_completed(void);
 unsigned long rcu_exp_batches_completed_sched(void);
 unsigned long srcu_batches_completed(struct srcu_struct *sp);
 void show_rcu_gp_kthreads(void);
+int rcu_get_gp_kthreads_prio(void);
 void rcu_force_quiescent_state(void);
 void rcu_bh_force_quiescent_state(void);
 void rcu_sched_force_quiescent_state(void);
index e232846..3424452 100644 (file)
@@ -19,6 +19,9 @@
  *
  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
 torture_param(int, nwriters, -1, "Number of RCU updater threads");
 torture_param(bool, shutdown, !IS_ENABLED(MODULE),
              "Shutdown at end of performance tests.");
-torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
 
 static char *perf_type = "rcu";
@@ -135,8 +138,8 @@ struct rcu_perf_ops {
        void (*cleanup)(void);
        int (*readlock)(void);
        void (*readunlock)(int idx);
-       unsigned long (*started)(void);
-       unsigned long (*completed)(void);
+       unsigned long (*get_gp_seq)(void);
+       unsigned long (*gp_diff)(unsigned long new, unsigned long old);
        unsigned long (*exp_completed)(void);
        void (*async)(struct rcu_head *head, rcu_callback_t func);
        void (*gp_barrier)(void);
@@ -176,8 +179,8 @@ static struct rcu_perf_ops rcu_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = rcu_perf_read_lock,
        .readunlock     = rcu_perf_read_unlock,
-       .started        = rcu_batches_started,
-       .completed      = rcu_batches_completed,
+       .get_gp_seq     = rcu_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed,
        .async          = call_rcu,
        .gp_barrier     = rcu_barrier,
@@ -206,8 +209,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = rcu_bh_perf_read_lock,
        .readunlock     = rcu_bh_perf_read_unlock,
-       .started        = rcu_batches_started_bh,
-       .completed      = rcu_batches_completed_bh,
+       .get_gp_seq     = rcu_bh_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed_sched,
        .async          = call_rcu_bh,
        .gp_barrier     = rcu_barrier_bh,
@@ -263,8 +266,8 @@ static struct rcu_perf_ops srcu_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = srcu_perf_read_lock,
        .readunlock     = srcu_perf_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_perf_completed,
+       .get_gp_seq     = srcu_perf_completed,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = srcu_perf_completed,
        .async          = srcu_call_rcu,
        .gp_barrier     = srcu_rcu_barrier,
@@ -292,8 +295,8 @@ static struct rcu_perf_ops srcud_ops = {
        .cleanup        = srcu_sync_perf_cleanup,
        .readlock       = srcu_perf_read_lock,
        .readunlock     = srcu_perf_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_perf_completed,
+       .get_gp_seq     = srcu_perf_completed,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = srcu_perf_completed,
        .async          = srcu_call_rcu,
        .gp_barrier     = srcu_rcu_barrier,
@@ -322,8 +325,8 @@ static struct rcu_perf_ops sched_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = sched_perf_read_lock,
        .readunlock     = sched_perf_read_unlock,
-       .started        = rcu_batches_started_sched,
-       .completed      = rcu_batches_completed_sched,
+       .get_gp_seq     = rcu_sched_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed_sched,
        .async          = call_rcu_sched,
        .gp_barrier     = rcu_barrier_sched,
@@ -350,8 +353,8 @@ static struct rcu_perf_ops tasks_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = tasks_perf_read_lock,
        .readunlock     = tasks_perf_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
+       .gp_diff        = rcu_seq_diff,
        .async          = call_rcu_tasks,
        .gp_barrier     = rcu_barrier_tasks,
        .sync           = synchronize_rcu_tasks,
@@ -359,9 +362,11 @@ static struct rcu_perf_ops tasks_ops = {
        .name           = "tasks"
 };
 
-static bool __maybe_unused torturing_tasks(void)
+static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
 {
-       return cur_ops == &tasks_ops;
+       if (!cur_ops->gp_diff)
+               return new - old;
+       return cur_ops->gp_diff(new, old);
 }
 
 /*
@@ -444,8 +449,7 @@ rcu_perf_writer(void *arg)
                        b_rcu_perf_writer_started =
                                cur_ops->exp_completed() / 2;
                } else {
-                       b_rcu_perf_writer_started =
-                               cur_ops->completed();
+                       b_rcu_perf_writer_started = cur_ops->get_gp_seq();
                }
        }
 
@@ -502,7 +506,7 @@ retry:
                                                cur_ops->exp_completed() / 2;
                                } else {
                                        b_rcu_perf_writer_finished =
-                                               cur_ops->completed();
+                                               cur_ops->get_gp_seq();
                                }
                                if (shutdown) {
                                        smp_mb(); /* Assign before wake. */
@@ -527,7 +531,7 @@ retry:
        return 0;
 }
 
-static inline void
+static void
 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
 {
        pr_alert("%s" PERF_FLAG
@@ -582,8 +586,8 @@ rcu_perf_cleanup(void)
                         t_rcu_perf_writer_finished -
                         t_rcu_perf_writer_started,
                         ngps,
-                        b_rcu_perf_writer_finished -
-                        b_rcu_perf_writer_started);
+                        rcuperf_seq_diff(b_rcu_perf_writer_finished,
+                                         b_rcu_perf_writer_started));
                for (i = 0; i < nrealwriters; i++) {
                        if (!writer_durations)
                                break;
@@ -671,12 +675,11 @@ rcu_perf_init(void)
                        break;
        }
        if (i == ARRAY_SIZE(perf_ops)) {
-               pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
-                        perf_type);
+               pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
                pr_alert("rcu-perf types:");
                for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
-                       pr_alert(" %s", perf_ops[i]->name);
-               pr_alert("\n");
+                       pr_cont(" %s", perf_ops[i]->name);
+               pr_cont("\n");
                firsterr = -EINVAL;
                goto unwind;
        }
index 42fcb7f..c596c6f 100644 (file)
@@ -22,6 +22,9 @@
  *
  * See also:  Documentation/RCU/torture.txt
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -52,6 +55,7 @@
 #include <linux/torture.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/sysctl.h>
 
 #include "rcu.h"
 
@@ -59,6 +63,19 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
+/* Bits for ->extendables field, extendables param, and related definitions. */
+#define RCUTORTURE_RDR_SHIFT    8      /* Put SRCU index in upper bits. */
+#define RCUTORTURE_RDR_MASK     ((1 << RCUTORTURE_RDR_SHIFT) - 1)
+#define RCUTORTURE_RDR_BH       0x1    /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ      0x2    /*  ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT  0x4    /*  ... disabling preemption. */
+#define RCUTORTURE_RDR_RCU      0x8    /*  ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS    4      /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND   (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
+                                 RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_MAX_LOOPS 0x7   /* Maximum reader extensions. */
+                                       /* Must be power of two minus one. */
+
 torture_param(int, cbflood_inter_holdoff, HZ,
              "Holdoff between floods (jiffies)");
 torture_param(int, cbflood_intra_holdoff, 1,
@@ -66,6 +83,8 @@ torture_param(int, cbflood_intra_holdoff, 1,
 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
 torture_param(int, cbflood_n_per_burst, 20000,
              "# callbacks per burst in flood");
+torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
+             "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
 torture_param(int, fqs_duration, 0,
              "Duration of fqs bursts (us), 0 to disable");
 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -84,7 +103,7 @@ torture_param(int, object_debug, 0,
             "Enable debug-object double call_rcu() testing");
 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 torture_param(int, onoff_interval, 0,
-            "Time between CPU hotplugs (s), 0=disable");
+            "Time between CPU hotplugs (jiffies), 0=disable");
 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -101,7 +120,7 @@ torture_param(int, test_boost_interval, 7,
             "Interval between boost tests, seconds.");
 torture_param(bool, test_no_idle_hz, true,
             "Test support for tickless idle CPUs");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
 
 static char *torture_type = "rcu";
@@ -148,9 +167,9 @@ static long n_rcu_torture_boost_ktrerror;
 static long n_rcu_torture_boost_rterror;
 static long n_rcu_torture_boost_failure;
 static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
+static atomic_long_t n_rcu_torture_timers;
 static long n_barrier_attempts;
-static long n_barrier_successes;
+static long n_barrier_successes; /* did rcu_barrier test succeed? */
 static atomic_long_t n_cbfloods;
 static struct list_head rcu_torture_removed;
 
@@ -261,8 +280,8 @@ struct rcu_torture_ops {
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
-       unsigned long (*started)(void);
-       unsigned long (*completed)(void);
+       unsigned long (*get_gp_seq)(void);
+       unsigned long (*gp_diff)(unsigned long new, unsigned long old);
        void (*deferred_free)(struct rcu_torture *p);
        void (*sync)(void);
        void (*exp_sync)(void);
@@ -274,6 +293,8 @@ struct rcu_torture_ops {
        void (*stats)(void);
        int irq_capable;
        int can_boost;
+       int extendables;
+       int ext_irq_conflict;
        const char *name;
 };
 
@@ -302,10 +323,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
         * force_quiescent_state. */
 
        if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
-               started = cur_ops->completed();
+               started = cur_ops->get_gp_seq();
                ts = rcu_trace_clock_local();
                mdelay(longdelay_ms);
-               completed = cur_ops->completed();
+               completed = cur_ops->get_gp_seq();
                do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
                                          started, completed);
        }
@@ -397,8 +418,8 @@ static struct rcu_torture_ops rcu_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,
        .readunlock     = rcu_torture_read_unlock,
-       .started        = rcu_batches_started,
-       .completed      = rcu_batches_completed,
+       .get_gp_seq     = rcu_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_torture_deferred_free,
        .sync           = synchronize_rcu,
        .exp_sync       = synchronize_rcu_expedited,
@@ -439,8 +460,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .readlock       = rcu_bh_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_bh_torture_read_unlock,
-       .started        = rcu_batches_started_bh,
-       .completed      = rcu_batches_completed_bh,
+       .get_gp_seq     = rcu_bh_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_bh_torture_deferred_free,
        .sync           = synchronize_rcu_bh,
        .exp_sync       = synchronize_rcu_bh_expedited,
@@ -449,6 +470,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .fqs            = rcu_bh_force_quiescent_state,
        .stats          = NULL,
        .irq_capable    = 1,
+       .extendables    = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
+       .ext_irq_conflict = RCUTORTURE_RDR_RCU,
        .name           = "rcu_bh"
 };
 
@@ -483,8 +506,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_torture_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
        .deferred_free  = rcu_busted_torture_deferred_free,
        .sync           = synchronize_rcu_busted,
        .exp_sync       = synchronize_rcu_busted,
@@ -572,8 +594,7 @@ static struct rcu_torture_ops srcu_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_torture_completed,
+       .get_gp_seq     = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
        .exp_sync       = srcu_torture_synchronize_expedited,
@@ -610,8 +631,7 @@ static struct rcu_torture_ops srcud_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_torture_completed,
+       .get_gp_seq     = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
        .exp_sync       = srcu_torture_synchronize_expedited,
@@ -622,6 +642,26 @@ static struct rcu_torture_ops srcud_ops = {
        .name           = "srcud"
 };
 
+/* As above, but broken due to inappropriate reader extension. */
+static struct rcu_torture_ops busted_srcud_ops = {
+       .ttype          = SRCU_FLAVOR,
+       .init           = srcu_torture_init,
+       .cleanup        = srcu_torture_cleanup,
+       .readlock       = srcu_torture_read_lock,
+       .read_delay     = rcu_read_delay,
+       .readunlock     = srcu_torture_read_unlock,
+       .get_gp_seq     = srcu_torture_completed,
+       .deferred_free  = srcu_torture_deferred_free,
+       .sync           = srcu_torture_synchronize,
+       .exp_sync       = srcu_torture_synchronize_expedited,
+       .call           = srcu_torture_call,
+       .cb_barrier     = srcu_torture_barrier,
+       .stats          = srcu_torture_stats,
+       .irq_capable    = 1,
+       .extendables    = RCUTORTURE_MAX_EXTEND,
+       .name           = "busted_srcud"
+};
+
 /*
  * Definitions for sched torture testing.
  */
@@ -648,8 +688,8 @@ static struct rcu_torture_ops sched_ops = {
        .readlock       = sched_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = sched_torture_read_unlock,
-       .started        = rcu_batches_started_sched,
-       .completed      = rcu_batches_completed_sched,
+       .get_gp_seq     = rcu_sched_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_sched_torture_deferred_free,
        .sync           = synchronize_sched,
        .exp_sync       = synchronize_sched_expedited,
@@ -660,6 +700,7 @@ static struct rcu_torture_ops sched_ops = {
        .fqs            = rcu_sched_force_quiescent_state,
        .stats          = NULL,
        .irq_capable    = 1,
+       .extendables    = RCUTORTURE_MAX_EXTEND,
        .name           = "sched"
 };
 
@@ -687,8 +728,7 @@ static struct rcu_torture_ops tasks_ops = {
        .readlock       = tasks_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = tasks_torture_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
        .deferred_free  = rcu_tasks_torture_deferred_free,
        .sync           = synchronize_rcu_tasks,
        .exp_sync       = synchronize_rcu_tasks,
@@ -700,6 +740,13 @@ static struct rcu_torture_ops tasks_ops = {
        .name           = "tasks"
 };
 
+static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
+{
+       if (!cur_ops->gp_diff)
+               return new - old;
+       return cur_ops->gp_diff(new, old);
+}
+
 static bool __maybe_unused torturing_tasks(void)
 {
        return cur_ops == &tasks_ops;
@@ -726,6 +773,44 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
        smp_store_release(&rbip->inflight, 0);
 }
 
+static int old_rt_runtime = -1;
+
+static void rcu_torture_disable_rt_throttle(void)
+{
+       /*
+        * Disable RT throttling so that rcutorture's boost threads don't get
+        * throttled. Only possible if rcutorture is built-in otherwise the
+        * user should manually do this by setting the sched_rt_period_us and
+        * sched_rt_runtime sysctls.
+        */
+       if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
+               return;
+
+       old_rt_runtime = sysctl_sched_rt_runtime;
+       sysctl_sched_rt_runtime = -1;
+}
+
+static void rcu_torture_enable_rt_throttle(void)
+{
+       if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
+               return;
+
+       sysctl_sched_rt_runtime = old_rt_runtime;
+       old_rt_runtime = -1;
+}
+
+static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
+{
+       if (end - start > test_boost_duration * HZ - HZ / 2) {
+               VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
+               n_rcu_torture_boost_failure++;
+
+               return true; /* failed */
+       }
+
+       return false; /* passed */
+}
+
 static int rcu_torture_boost(void *arg)
 {
        unsigned long call_rcu_time;
@@ -746,6 +831,21 @@ static int rcu_torture_boost(void *arg)
        init_rcu_head_on_stack(&rbi.rcu);
        /* Each pass through the following loop does one boost-test cycle. */
        do {
+               /* Track if the test failed already in this test interval? */
+               bool failed = false;
+
+               /* Increment n_rcu_torture_boosts once per boost-test */
+               while (!kthread_should_stop()) {
+                       if (mutex_trylock(&boost_mutex)) {
+                               n_rcu_torture_boosts++;
+                               mutex_unlock(&boost_mutex);
+                               break;
+                       }
+                       schedule_timeout_uninterruptible(1);
+               }
+               if (kthread_should_stop())
+                       goto checkwait;
+
                /* Wait for the next test interval. */
                oldstarttime = boost_starttime;
                while (ULONG_CMP_LT(jiffies, oldstarttime)) {
@@ -764,11 +864,10 @@ static int rcu_torture_boost(void *arg)
                                /* RCU core before ->inflight = 1. */
                                smp_store_release(&rbi.inflight, 1);
                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
-                               if (jiffies - call_rcu_time >
-                                        test_boost_duration * HZ - HZ / 2) {
-                                       VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
-                                       n_rcu_torture_boost_failure++;
-                               }
+                               /* Check if the boost test failed */
+                               failed = failed ||
+                                        rcu_torture_boost_failed(call_rcu_time,
+                                                                jiffies);
                                call_rcu_time = jiffies;
                        }
                        stutter_wait("rcu_torture_boost");
@@ -776,6 +875,14 @@ static int rcu_torture_boost(void *arg)
                                goto checkwait;
                }
 
+               /*
+                * If boost never happened, then inflight will always be 1, in
+                * this case the boost check would never happen in the above
+                * loop so do another one here.
+                */
+               if (!failed && smp_load_acquire(&rbi.inflight))
+                       rcu_torture_boost_failed(call_rcu_time, jiffies);
+
                /*
                 * Set the start time of the next test interval.
                 * Yes, this is vulnerable to long delays, but such
@@ -788,7 +895,6 @@ static int rcu_torture_boost(void *arg)
                        if (mutex_trylock(&boost_mutex)) {
                                boost_starttime = jiffies +
                                                  test_boost_interval * HZ;
-                               n_rcu_torture_boosts++;
                                mutex_unlock(&boost_mutex);
                                break;
                        }
@@ -1010,7 +1116,7 @@ rcu_torture_writer(void *arg)
                                break;
                        }
                }
-               rcutorture_record_progress(++rcu_torture_current_version);
+               rcu_torture_current_version++;
                /* Cycle through nesting levels of rcu_expedite_gp() calls. */
                if (can_expedite &&
                    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1084,27 +1190,133 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
 }
 
 /*
- * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array.  The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
+ * Do one extension of an RCU read-side critical section using the
+ * current reader state in readstate (set to zero for initial entry
+ * to extended critical section), set the new state as specified by
+ * newstate (set to zero for final exit from extended critical section),
+ * and random-number-generator state in trsp.  If this is neither the
+ * beginning or end of the critical section and if there was actually a
+ * change, do a ->read_delay().
  */
-static void rcu_torture_timer(struct timer_list *unused)
+static void rcutorture_one_extend(int *readstate, int newstate,
+                                 struct torture_random_state *trsp)
+{
+       int idxnew = -1;
+       int idxold = *readstate;
+       int statesnew = ~*readstate & newstate;
+       int statesold = *readstate & ~newstate;
+
+       WARN_ON_ONCE(idxold < 0);
+       WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+
+       /* First, put new protection in place to avoid critical-section gap. */
+       if (statesnew & RCUTORTURE_RDR_BH)
+               local_bh_disable();
+       if (statesnew & RCUTORTURE_RDR_IRQ)
+               local_irq_disable();
+       if (statesnew & RCUTORTURE_RDR_PREEMPT)
+               preempt_disable();
+       if (statesnew & RCUTORTURE_RDR_RCU)
+               idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+       /* Next, remove old protection, irq first due to bh conflict. */
+       if (statesold & RCUTORTURE_RDR_IRQ)
+               local_irq_enable();
+       if (statesold & RCUTORTURE_RDR_BH)
+               local_bh_enable();
+       if (statesold & RCUTORTURE_RDR_PREEMPT)
+               preempt_enable();
+       if (statesold & RCUTORTURE_RDR_RCU)
+               cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
+
+       /* Delay if neither beginning nor end and there was a change. */
+       if ((statesnew || statesold) && *readstate && newstate)
+               cur_ops->read_delay(trsp);
+
+       /* Update the reader state. */
+       if (idxnew == -1)
+               idxnew = idxold & ~RCUTORTURE_RDR_MASK;
+       WARN_ON_ONCE(idxnew < 0);
+       WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
+       *readstate = idxnew | newstate;
+       WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
+       WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
+}
+
+/* Return the biggest extendables mask given current RCU and boot parameters. */
+static int rcutorture_extend_mask_max(void)
+{
+       int mask;
+
+       WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
+       mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
+       mask = mask | RCUTORTURE_RDR_RCU;
+       return mask;
+}
+
+/* Return a random protection state mask, but with at least one bit set. */
+static int
+rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+{
+       int mask = rcutorture_extend_mask_max();
+       unsigned long randmask1 = torture_random(trsp) >> 8;
+       unsigned long randmask2 = randmask1 >> 1;
+
+       WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+       /* Half the time lots of bits, half the time only one bit. */
+       if (randmask1 & 0x1)
+               mask = mask & randmask2;
+       else
+               mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+       if ((mask & RCUTORTURE_RDR_IRQ) &&
+           !(mask & RCUTORTURE_RDR_BH) &&
+           (oldmask & RCUTORTURE_RDR_BH))
+               mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+       if ((mask & RCUTORTURE_RDR_IRQ) &&
+           !(mask & cur_ops->ext_irq_conflict) &&
+           (oldmask & cur_ops->ext_irq_conflict))
+               mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
+       return mask ?: RCUTORTURE_RDR_RCU;
+}
+
+/*
+ * Do a randomly selected number of extensions of an existing RCU read-side
+ * critical section.
+ */
+static void rcutorture_loop_extend(int *readstate,
+                                  struct torture_random_state *trsp)
+{
+       int i;
+       int mask = rcutorture_extend_mask_max();
+
+       WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
+       if (!((mask - 1) & mask))
+               return;  /* Current RCU flavor not extendable. */
+       i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
+       while (i--) {
+               mask = rcutorture_extend_mask(*readstate, trsp);
+               rcutorture_one_extend(readstate, mask, trsp);
+       }
+}
+
+/*
+ * Do one read-side critical section, returning false if there was
+ * no data to read.  Can be invoked both from process context and
+ * from a timer handler.
+ */
+static bool rcu_torture_one_read(struct torture_random_state *trsp)
 {
-       int idx;
        unsigned long started;
        unsigned long completed;
-       static DEFINE_TORTURE_RANDOM(rand);
-       static DEFINE_SPINLOCK(rand_lock);
+       int newstate;
        struct rcu_torture *p;
        int pipe_count;
+       int readstate = 0;
        unsigned long long ts;
 
-       idx = cur_ops->readlock();
-       if (cur_ops->started)
-               started = cur_ops->started();
-       else
-               started = cur_ops->completed();
+       newstate = rcutorture_extend_mask(readstate, trsp);
+       rcutorture_one_extend(&readstate, newstate, trsp);
+       started = cur_ops->get_gp_seq();
        ts = rcu_trace_clock_local();
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
@@ -1112,39 +1324,50 @@ static void rcu_torture_timer(struct timer_list *unused)
                                  srcu_read_lock_held(srcu_ctlp) ||
                                  torturing_tasks());
        if (p == NULL) {
-               /* Leave because rcu_torture_writer is not yet underway */
-               cur_ops->readunlock(idx);
-               return;
+               /* Wait for rcu_torture_writer to get underway */
+               rcutorture_one_extend(&readstate, 0, trsp);
+               return false;
        }
        if (p->rtort_mbtest == 0)
                atomic_inc(&n_rcu_torture_mberror);
-       spin_lock(&rand_lock);
-       cur_ops->read_delay(&rand);
-       n_rcu_torture_timers++;
-       spin_unlock(&rand_lock);
+       rcutorture_loop_extend(&readstate, trsp);
        preempt_disable();
        pipe_count = p->rtort_pipe_count;
        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       completed = cur_ops->completed();
+       completed = cur_ops->get_gp_seq();
        if (pipe_count > 1) {
-               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
-                                         started, completed);
+               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+                                         ts, started, completed);
                rcu_ftrace_dump(DUMP_ALL);
        }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
-       completed = completed - started;
-       if (cur_ops->started)
-               completed++;
+       completed = rcutorture_seq_diff(completed, started);
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
        }
        __this_cpu_inc(rcu_torture_batch[completed]);
        preempt_enable();
-       cur_ops->readunlock(idx);
+       rcutorture_one_extend(&readstate, 0, trsp);
+       WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
+       return true;
+}
+
+static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
+
+/*
+ * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array.  The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static void rcu_torture_timer(struct timer_list *unused)
+{
+       atomic_long_inc(&n_rcu_torture_timers);
+       (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
 
        /* Test call_rcu() invocation from interrupt handler. */
        if (cur_ops->call) {
@@ -1164,14 +1387,8 @@ static void rcu_torture_timer(struct timer_list *unused)
 static int
 rcu_torture_reader(void *arg)
 {
-       unsigned long started;
-       unsigned long completed;
-       int idx;
        DEFINE_TORTURE_RANDOM(rand);
-       struct rcu_torture *p;
-       int pipe_count;
        struct timer_list t;
-       unsigned long long ts;
 
        VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
        set_user_nice(current, MAX_NICE);
@@ -1183,49 +1400,8 @@ rcu_torture_reader(void *arg)
                        if (!timer_pending(&t))
                                mod_timer(&t, jiffies + 1);
                }
-               idx = cur_ops->readlock();
-               if (cur_ops->started)
-                       started = cur_ops->started();
-               else
-                       started = cur_ops->completed();
-               ts = rcu_trace_clock_local();
-               p = rcu_dereference_check(rcu_torture_current,
-                                         rcu_read_lock_bh_held() ||
-                                         rcu_read_lock_sched_held() ||
-                                         srcu_read_lock_held(srcu_ctlp) ||
-                                         torturing_tasks());
-               if (p == NULL) {
-                       /* Wait for rcu_torture_writer to get underway */
-                       cur_ops->readunlock(idx);
+               if (!rcu_torture_one_read(&rand))
                        schedule_timeout_interruptible(HZ);
-                       continue;
-               }
-               if (p->rtort_mbtest == 0)
-                       atomic_inc(&n_rcu_torture_mberror);
-               cur_ops->read_delay(&rand);
-               preempt_disable();
-               pipe_count = p->rtort_pipe_count;
-               if (pipe_count > RCU_TORTURE_PIPE_LEN) {
-                       /* Should not happen, but... */
-                       pipe_count = RCU_TORTURE_PIPE_LEN;
-               }
-               completed = cur_ops->completed();
-               if (pipe_count > 1) {
-                       do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
-                                                 ts, started, completed);
-                       rcu_ftrace_dump(DUMP_ALL);
-               }
-               __this_cpu_inc(rcu_torture_count[pipe_count]);
-               completed = completed - started;
-               if (cur_ops->started)
-                       completed++;
-               if (completed > RCU_TORTURE_PIPE_LEN) {
-                       /* Should not happen, but... */
-                       completed = RCU_TORTURE_PIPE_LEN;
-               }
-               __this_cpu_inc(rcu_torture_batch[completed]);
-               preempt_enable();
-               cur_ops->readunlock(idx);
                stutter_wait("rcu_torture_reader");
        } while (!torture_must_stop());
        if (irqreader && cur_ops->irq_capable) {
@@ -1282,7 +1458,7 @@ rcu_torture_stats_print(void)
        pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
                n_rcu_torture_boost_failure,
                n_rcu_torture_boosts,
-               n_rcu_torture_timers);
+               atomic_long_read(&n_rcu_torture_timers));
        torture_onoff_stats();
        pr_cont("barrier: %ld/%ld:%ld ",
                n_barrier_successes,
@@ -1324,18 +1500,16 @@ rcu_torture_stats_print(void)
        if (rtcv_snap == rcu_torture_current_version &&
            rcu_torture_current != NULL) {
                int __maybe_unused flags = 0;
-               unsigned long __maybe_unused gpnum = 0;
-               unsigned long __maybe_unused completed = 0;
+               unsigned long __maybe_unused gp_seq = 0;
 
                rcutorture_get_gp_data(cur_ops->ttype,
-                                      &flags, &gpnum, &completed);
+                                      &flags, &gp_seq);
                srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
-                                       &flags, &gpnum, &completed);
+                                       &flags, &gp_seq);
                wtp = READ_ONCE(writer_task);
-               pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n",
+               pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
                         rcu_torture_writer_state_getname(),
-                        rcu_torture_writer_state,
-                        gpnum, completed, flags,
+                        rcu_torture_writer_state, gp_seq, flags,
                         wtp == NULL ? ~0UL : wtp->state,
                         wtp == NULL ? -1 : (int)task_cpu(wtp));
                if (!splatted && wtp) {
@@ -1365,7 +1539,7 @@ rcu_torture_stats(void *arg)
        return 0;
 }
 
-static inline void
+static void
 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
 {
        pr_alert("%s" TORTURE_FLAG
@@ -1397,6 +1571,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu)
        mutex_lock(&boost_mutex);
        t = boost_tasks[cpu];
        boost_tasks[cpu] = NULL;
+       rcu_torture_enable_rt_throttle();
        mutex_unlock(&boost_mutex);
 
        /* This must be outside of the mutex, otherwise deadlock! */
@@ -1413,6 +1588,7 @@ static int rcutorture_booster_init(unsigned int cpu)
 
        /* Don't allow time recalculation while creating a new task. */
        mutex_lock(&boost_mutex);
+       rcu_torture_disable_rt_throttle();
        VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
        boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
                                                  cpu_to_node(cpu),
@@ -1446,7 +1622,7 @@ static int rcu_torture_stall(void *args)
                VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
        }
        if (!kthread_should_stop()) {
-               stop_at = get_seconds() + stall_cpu;
+               stop_at = ktime_get_seconds() + stall_cpu;
                /* RCU CPU stall is expected behavior in following code. */
                rcu_read_lock();
                if (stall_cpu_irqsoff)
@@ -1455,7 +1631,8 @@ static int rcu_torture_stall(void *args)
                        preempt_disable();
                pr_alert("rcu_torture_stall start on CPU %d.\n",
                         smp_processor_id());
-               while (ULONG_CMP_LT(get_seconds(), stop_at))
+               while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+                                   stop_at))
                        continue;  /* Induce RCU CPU stall warning. */
                if (stall_cpu_irqsoff)
                        local_irq_enable();
@@ -1546,8 +1723,9 @@ static int rcu_torture_barrier(void *arg)
                               atomic_read(&barrier_cbs_invoked),
                               n_barrier_cbs);
                        WARN_ON_ONCE(1);
+               } else {
+                       n_barrier_successes++;
                }
-               n_barrier_successes++;
                schedule_timeout_interruptible(HZ / 10);
        } while (!torture_must_stop());
        torture_kthread_stopping("rcu_torture_barrier");
@@ -1610,17 +1788,39 @@ static void rcu_torture_barrier_cleanup(void)
        }
 }
 
+static bool rcu_torture_can_boost(void)
+{
+       static int boost_warn_once;
+       int prio;
+
+       if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
+               return false;
+
+       prio = rcu_get_gp_kthreads_prio();
+       if (!prio)
+               return false;
+
+       if (prio < 2) {
+               if (boost_warn_once  == 1)
+                       return false;
+
+               pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
+               boost_warn_once = 1;
+               return false;
+       }
+
+       return true;
+}
+
 static enum cpuhp_state rcutor_hp;
 
 static void
 rcu_torture_cleanup(void)
 {
        int flags = 0;
-       unsigned long gpnum = 0;
-       unsigned long completed = 0;
+       unsigned long gp_seq = 0;
        int i;
 
-       rcutorture_record_test_transition();
        if (torture_cleanup_begin()) {
                if (cur_ops->cb_barrier != NULL)
                        cur_ops->cb_barrier();
@@ -1648,17 +1848,15 @@ rcu_torture_cleanup(void)
                fakewriter_tasks = NULL;
        }
 
-       rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed);
-       srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
-                               &flags, &gpnum, &completed);
-       pr_alert("%s:  End-test grace-period state: g%lu c%lu f%#x\n",
-                cur_ops->name, gpnum, completed, flags);
+       rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
+       srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+       pr_alert("%s:  End-test grace-period state: g%lu f%#x\n",
+                cur_ops->name, gp_seq, flags);
        torture_stop_kthread(rcu_torture_stats, stats_task);
        torture_stop_kthread(rcu_torture_fqs, fqs_task);
        for (i = 0; i < ncbflooders; i++)
                torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
-       if ((test_boost == 1 && cur_ops->can_boost) ||
-           test_boost == 2)
+       if (rcu_torture_can_boost())
                cpuhp_remove_state(rcutor_hp);
 
        /*
@@ -1746,7 +1944,7 @@ rcu_torture_init(void)
        int firsterr = 0;
        static struct rcu_torture_ops *torture_ops[] = {
                &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
-               &sched_ops, &tasks_ops,
+               &busted_srcud_ops, &sched_ops, &tasks_ops,
        };
 
        if (!torture_init_begin(torture_type, verbose))
@@ -1763,8 +1961,8 @@ rcu_torture_init(void)
                         torture_type);
                pr_alert("rcu-torture types:");
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
-                       pr_alert(" %s", torture_ops[i]->name);
-               pr_alert("\n");
+                       pr_cont(" %s", torture_ops[i]->name);
+               pr_cont("\n");
                firsterr = -EINVAL;
                goto unwind;
        }
@@ -1882,8 +2080,7 @@ rcu_torture_init(void)
                test_boost_interval = 1;
        if (test_boost_duration < 2)
                test_boost_duration = 2;
-       if ((test_boost == 1 && cur_ops->can_boost) ||
-           test_boost == 2) {
+       if (rcu_torture_can_boost()) {
 
                boost_starttime = jiffies + test_boost_interval * HZ;
 
@@ -1897,7 +2094,7 @@ rcu_torture_init(void)
        firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
        if (firsterr)
                goto unwind;
-       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
        if (firsterr)
                goto unwind;
        firsterr = rcu_torture_stall_init();
@@ -1926,7 +2123,6 @@ rcu_torture_init(void)
                                goto unwind;
                }
        }
-       rcutorture_record_test_transition();
        torture_init_end();
        return 0;
 
index b4123d7..6c9866a 100644 (file)
@@ -26,6 +26,8 @@
  *
  */
 
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/export.h>
 #include <linux/mutex.h>
 #include <linux/percpu.h>
@@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
                }
        if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
            WARN_ON(srcu_readers_active(sp))) {
-               pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+               pr_info("%s: Active srcu_struct %p state: %d\n",
+                       __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
                return; /* Caller forgot to stop doing call_srcu()? */
        }
        free_percpu(sp->sda);
@@ -641,6 +644,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
  * period s.  Losers must either ensure that their desired grace-period
  * number is recorded on at least their leaf srcu_node structure, or they
  * must take steps to invoke their own callbacks.
+ *
+ * Note that this function also does the work of srcu_funnel_exp_start(),
+ * in some cases by directly invoking it.
  */
 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
                                 unsigned long s, bool do_norm)
@@ -823,17 +829,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
  * more than one CPU, this means that when "func()" is invoked, each CPU
  * is guaranteed to have executed a full memory barrier since the end of
  * its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_rcu().  It also means that each CPU executing
+ * preceded the call to call_srcu().  It also means that each CPU executing
  * an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_rcu()
+ * "func()" must have executed a memory barrier after the call_srcu()
  * but before the beginning of that SRCU read-side critical section.
  * Note that these guarantees include CPUs that are offline, idle, or
  * executing in user mode, as well as CPUs that are executing in the kernel.
  *
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
  * resulting SRCU callback function "func()", then both CPU A and CPU
  * B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_rcu() and the invocation of "func()".
+ * interval between the call to call_srcu() and the invocation of "func()".
  * This guarantee applies even if CPU A and CPU B are the same CPU (but
  * again only if the system has more than one CPU).
  *
@@ -1246,13 +1252,12 @@ static void process_srcu(struct work_struct *work)
 
 void srcutorture_get_gp_data(enum rcutorture_type test_type,
                             struct srcu_struct *sp, int *flags,
-                            unsigned long *gpnum, unsigned long *completed)
+                            unsigned long *gp_seq)
 {
        if (test_type != SRCU_FLAVOR)
                return;
        *flags = 0;
-       *completed = rcu_seq_ctr(sp->srcu_gp_seq);
-       *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
+       *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
 }
 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
 
@@ -1263,16 +1268,17 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
        unsigned long s0 = 0, s1 = 0;
 
        idx = sp->srcu_idx & 0x1;
-       pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
+       pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
+                tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
        for_each_possible_cpu(cpu) {
                unsigned long l0, l1;
                unsigned long u0, u1;
                long c0, c1;
-               struct srcu_data *counts;
+               struct srcu_data *sdp;
 
-               counts = per_cpu_ptr(sp->sda, cpu);
-               u0 = counts->srcu_unlock_count[!idx];
-               u1 = counts->srcu_unlock_count[idx];
+               sdp = per_cpu_ptr(sp->sda, cpu);
+               u0 = sdp->srcu_unlock_count[!idx];
+               u1 = sdp->srcu_unlock_count[idx];
 
                /*
                 * Make sure that a lock is always counted if the corresponding
@@ -1280,12 +1286,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
                 */
                smp_rmb();
 
-               l0 = counts->srcu_lock_count[!idx];
-               l1 = counts->srcu_lock_count[idx];
+               l0 = sdp->srcu_lock_count[!idx];
+               l1 = sdp->srcu_lock_count[idx];
 
                c0 = l0 - u0;
                c1 = l1 - u1;
-               pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
+               pr_cont(" %d(%ld,%ld %1p)",
+                       cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
                s0 += c0;
                s1 += c1;
        }
index a64eee0..befc932 100644 (file)
@@ -122,10 +122,8 @@ void rcu_check_callbacks(int user)
 {
        if (user)
                rcu_sched_qs();
-       else if (!in_softirq())
+       if (user || !in_softirq())
                rcu_bh_qs();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
 }
 
 /*
index 91f888d..0b760c1 100644 (file)
@@ -27,6 +27,9 @@
  * For detailed explanation of Read-Copy Update mechanism see -
  *     Documentation/RCU
  */
+
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -95,13 +98,13 @@ struct rcu_state sname##_state = { \
        .rda = &sname##_data, \
        .call = cr, \
        .gp_state = RCU_GP_IDLE, \
-       .gpnum = 0UL - 300UL, \
-       .completed = 0UL - 300UL, \
+       .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
        .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
+       .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -155,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
+static void
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+                 struct rcu_node *rnp, unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -177,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
 static int gp_cleanup_delay;
 module_param(gp_cleanup_delay, int, 0444);
 
+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+       return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
 /*
  * Number of grace periods between delays, normalized by the duration of
  * the delay.  The longer the delay, the more the grace periods between
@@ -188,18 +201,6 @@ module_param(gp_cleanup_delay, int, 0444);
  */
 #define PER_RCU_NODE_PERIOD 3  /* Number of grace periods between delays. */
 
-/*
- * Track the rcutorture test sequence number and the update version
- * number within a given test.  The rcutorture_testseq is incremented
- * on every rcutorture module load and unload, so has an odd value
- * when a test is running.  The rcutorture_vernum is set to zero
- * when rcutorture starts and is incremented on each rcutorture update.
- * These variables enable correlating rcutorture output with the
- * RCU tracing information.
- */
-unsigned long rcutorture_testseq;
-unsigned long rcutorture_vernum;
-
 /*
  * Compute the mask of online CPUs for the specified rcu_node structure.
  * This will not be stable unless the rcu_node structure's ->lock is
@@ -218,7 +219,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  */
 static int rcu_gp_in_progress(struct rcu_state *rsp)
 {
-       return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
+       return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
 }
 
 /*
@@ -233,7 +234,7 @@ void rcu_sched_qs(void)
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
                return;
        trace_rcu_grace_period(TPS("rcu_sched"),
-                              __this_cpu_read(rcu_sched_data.gpnum),
+                              __this_cpu_read(rcu_sched_data.gp_seq),
                               TPS("cpuqs"));
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
@@ -248,7 +249,7 @@ void rcu_bh_qs(void)
        RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
        if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_bh"),
-                                      __this_cpu_read(rcu_bh_data.gpnum),
+                                      __this_cpu_read(rcu_bh_data.gp_seq),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
        }
@@ -379,20 +380,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
        return snap != rcu_dynticks_snap(rdtp);
 }
 
-/*
- * Do a double-increment of the ->dynticks counter to emulate a
- * momentary idle-CPU quiescent state.
- */
-static void rcu_dynticks_momentary_idle(void)
-{
-       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-       int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                       &rdtp->dynticks);
-
-       /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
-}
-
 /*
  * Set the special (bottom) bit of the specified CPU so that it
  * will take special action (such as flushing its TLB) on the
@@ -424,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
  *
  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
  *
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts and must not be idle.
  */
 static void rcu_momentary_dyntick_idle(void)
 {
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special;
+
        raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
-       rcu_dynticks_momentary_idle();
+       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+       /* It is illegal to call this from idle state. */
+       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 }
 
 /*
@@ -451,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
                rcu_momentary_dyntick_idle();
        this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
        if (!preempt)
-               rcu_note_voluntary_context_switch_lite(current);
+               rcu_tasks_qs(current);
 out:
        trace_rcu_utilization(TPS("End context switch"));
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -513,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
 static bool rcu_kick_kthreads;
 
-module_param(jiffies_till_first_fqs, ulong, 0644);
-module_param(jiffies_till_next_fqs, ulong, 0644);
+static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+       return ret;
+}
+
+static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+       return ret;
+}
+
+static struct kernel_param_ops first_fqs_jiffies_ops = {
+       .set = param_set_first_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+static struct kernel_param_ops next_fqs_jiffies_ops = {
+       .set = param_set_next_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
+module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
@@ -529,58 +551,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(void);
 
 /*
- * Return the number of RCU batches started thus far for debug & stats.
+ * Return the number of RCU GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started(void)
+unsigned long rcu_get_gp_seq(void)
 {
-       return rcu_state_p->gpnum;
+       return READ_ONCE(rcu_state_p->gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started);
+EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 
 /*
- * Return the number of RCU-sched batches started thus far for debug & stats.
+ * Return the number of RCU-sched GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started_sched(void)
+unsigned long rcu_sched_get_gp_seq(void)
 {
-       return rcu_sched_state.gpnum;
+       return READ_ONCE(rcu_sched_state.gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
 
 /*
- * Return the number of RCU BH batches started thus far for debug & stats.
+ * Return the number of RCU-bh GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started_bh(void)
+unsigned long rcu_bh_get_gp_seq(void)
 {
-       return rcu_bh_state.gpnum;
+       return READ_ONCE(rcu_bh_state.gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
-
-/*
- * Return the number of RCU batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed(void)
-{
-       return rcu_state_p->completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU-sched batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_sched(void)
-{
-       return rcu_sched_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
-/*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_bh(void)
-{
-       return rcu_bh_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
 
 /*
  * Return the number of RCU expedited batches completed thus far for
@@ -636,35 +631,42 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  */
 void show_rcu_gp_kthreads(void)
 {
+       int cpu;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp) {
                pr_info("%s: wait state: %d ->state: %#lx\n",
                        rsp->name, rsp->gp_state, rsp->gp_kthread->state);
+               rcu_for_each_node_breadth_first(rsp, rnp) {
+                       if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
+                               continue;
+                       pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+                               rnp->grplo, rnp->grphi, rnp->gp_seq,
+                               rnp->gp_seq_needed);
+                       if (!rcu_is_leaf_node(rnp))
+                               continue;
+                       for_each_leaf_node_possible_cpu(rnp, cpu) {
+                               rdp = per_cpu_ptr(rsp->rda, cpu);
+                               if (rdp->gpwrap ||
+                                   ULONG_CMP_GE(rsp->gp_seq,
+                                                rdp->gp_seq_needed))
+                                       continue;
+                               pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+                                       cpu, rdp->gp_seq_needed);
+                       }
+               }
                /* sched_show_task(rsp->gp_kthread); */
        }
 }
 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 
-/*
- * Record the number of times rcutorture tests have been initiated and
- * terminated.  This information allows the debugfs tracing stats to be
- * correlated to the rcutorture messages, even when the rcutorture module
- * is being repeatedly loaded and unloaded.  In other words, we cannot
- * store this state in rcutorture itself.
- */
-void rcutorture_record_test_transition(void)
-{
-       rcutorture_testseq++;
-       rcutorture_vernum = 0;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
-
 /*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-                           unsigned long *gpnum, unsigned long *completed)
+                           unsigned long *gp_seq)
 {
        struct rcu_state *rsp = NULL;
 
@@ -684,22 +686,10 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
        if (rsp == NULL)
                return;
        *flags = READ_ONCE(rsp->gp_flags);
-       *gpnum = READ_ONCE(rsp->gpnum);
-       *completed = READ_ONCE(rsp->completed);
+       *gp_seq = rcu_seq_current(&rsp->gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 
-/*
- * Record the number of writer passes through the current rcutorture test.
- * This is also used to correlate debugfs tracing stats with the rcutorture
- * messages.
- */
-void rcutorture_record_progress(unsigned long vernum)
-{
-       rcutorture_vernum++;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_progress);
-
 /*
  * Return the root node of the specified rcu_state structure.
  */
@@ -1059,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
- * Is the current CPU online?  Disable preemption to avoid false positives
- * that could otherwise happen due to the current CPU number being sampled,
- * this task being preempted, its old CPU being taken offline, resuming
- * on some other CPU, then determining that its old CPU is now offline.
- * It is OK to use RCU on an offline processor during initial boot, hence
- * the check for rcu_scheduler_fully_active.  Note also that it is OK
- * for a CPU coming online to use RCU for one jiffy prior to marking itself
- * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
- * offline to continue to use RCU for one jiffy after marking itself
- * offline in the cpu_online_mask.  This leniency is necessary given the
- * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the teardown
- * of the CPU.
+ * Is the current CPU online as far as RCU is concerned?
  *
- * This is also why RCU internally marks CPUs online during in the
- * preparation phase and offline after the CPU has been taken down.
+ * Disable preemption to avoid false positives that could otherwise
+ * happen due to the current CPU number being sampled, this task being
+ * preempted, its old CPU being taken offline, resuming on some other CPU,
+ * then determining that its old CPU is now offline.  Because there are
+ * multiple flavors of RCU, and because this function can be called in the
+ * midst of updating the flavors while a given CPU coming online or going
+ * offline, it is necessary to check all flavors.  If any of the flavors
+ * believe that given CPU is online, it is considered to be online.
  *
- * Disable checking if in an NMI handler because we cannot safely report
- * errors from NMI handlers anyway.
+ * Disable checking if in an NMI handler because we cannot safely
+ * report errors from NMI handlers anyway.  In addition, it is OK to use
+ * RCU on an offline processor during initial boot, hence the check for
+ * rcu_scheduler_fully_active.
  */
 bool rcu_lockdep_current_cpu_online(void)
 {
        struct rcu_data *rdp;
        struct rcu_node *rnp;
-       bool ret;
+       struct rcu_state *rsp;
 
-       if (in_nmi())
+       if (in_nmi() || !rcu_scheduler_fully_active)
                return true;
        preempt_disable();
-       rdp = this_cpu_ptr(&rcu_sched_data);
-       rnp = rdp->mynode;
-       ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
-             !rcu_scheduler_fully_active;
+       for_each_rcu_flavor(rsp) {
+               rdp = this_cpu_ptr(rsp->rda);
+               rnp = rdp->mynode;
+               if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
+                       preempt_enable();
+                       return true;
+               }
+       }
        preempt_enable();
-       return ret;
+       return false;
 }
 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 
@@ -1115,17 +1105,18 @@ static int rcu_is_cpu_rrupt_from_idle(void)
 /*
  * We are reporting a quiescent state on behalf of some other CPU, so
  * it is our responsibility to check for and handle potential overflow
- * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
  * After all, the CPU might be in deep idle state, and thus executing no
  * code whatsoever.
  */
 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
 {
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+       if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
+                        rnp->gp_seq))
                WRITE_ONCE(rdp->gpwrap, true);
-       if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
-               rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+       if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
+               rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
 }
 
 /*
@@ -1137,7 +1128,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
        rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rcu_gpnum_ovf(rdp->mynode, rdp);
                return 1;
        }
@@ -1159,7 +1150,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);
        if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
-               rdp->rcu_iw_gpnum = rnp->gpnum;
+               rdp->rcu_iw_gp_seq = rnp->gp_seq;
                rdp->rcu_iw_pending = false;
        }
        raw_spin_unlock_rcu_node(rnp);
@@ -1187,7 +1178,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         * of the current RCU grace period.
         */
        if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rdp->dynticks_fqs++;
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
@@ -1203,8 +1194,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
        if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
            READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
-           READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+           rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
        } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
@@ -1212,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                smp_store_release(ruqp, true);
        }
 
-       /* Check for the CPU being offline. */
-       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
-               rdp->offline_fqs++;
-               rcu_gpnum_ovf(rnp, rdp);
-               return 1;
+       /* If waiting too long on an offline CPU, complain. */
+       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
+           time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+               bool onl;
+               struct rcu_node *rnp1;
+
+               WARN_ON(1);  /* Offline CPUs are supposed to report QS! */
+               pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+                       __func__, rnp->grplo, rnp->grphi, rnp->level,
+                       (long)rnp->gp_seq, (long)rnp->completedqs);
+               for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+                       pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
+                               __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       __func__, rdp->cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+               return 1; /* Break things loose after complaining. */
        }
 
        /*
@@ -1256,11 +1260,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
                resched_cpu(rdp->cpu);
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
-                   !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+                   !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
                    (rnp->ffmask & rdp->grpmask)) {
                        init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
                        rdp->rcu_iw_pending = true;
-                       rdp->rcu_iw_gpnum = rnp->gpnum;
+                       rdp->rcu_iw_gp_seq = rnp->gp_seq;
                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
                }
        }
@@ -1274,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        unsigned long j1;
 
        rsp->gp_start = j;
-       smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       WRITE_ONCE(rsp->jiffies_stall, j + j1);
+       /* Record ->gp_start before ->jiffies_stall. */
+       smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
        rsp->jiffies_resched = j + j1 / 2;
        rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
 }
@@ -1302,9 +1306,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
        j = jiffies;
        gpa = READ_ONCE(rsp->gp_activity);
        if (j - gpa > 2 * HZ) {
-               pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+               pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
                       rsp->name, j - gpa,
-                      rsp->gpnum, rsp->completed,
+                      (long)rcu_seq_current(&rsp->gp_seq),
                       rsp->gp_flags,
                       gp_state_getname(rsp->gp_state), rsp->gp_state,
                       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
@@ -1359,16 +1363,15 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
        }
 }
 
-static inline void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(void)
 {
        if (sysctl_panic_on_rcu_stall)
                panic("RCU Stall\n");
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 {
        int cpu;
-       long delta;
        unsigned long flags;
        unsigned long gpa;
        unsigned long j;
@@ -1381,25 +1384,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        if (rcu_cpu_stall_suppress)
                return;
 
-       /* Only let one CPU complain about others per time interval. */
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       delta = jiffies - READ_ONCE(rsp->jiffies_stall);
-       if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       WRITE_ONCE(rsp->jiffies_stall,
-                  jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
        /*
         * OK, time to rat on our buddy...
         * See Documentation/RCU/stallwarn.txt for info on how to debug
         * RCU CPU stall warnings.
         */
-       pr_err("INFO: %s detected stalls on CPUs/tasks:",
-              rsp->name);
+       pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
        print_cpu_stall_info_begin();
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1418,17 +1408,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                                                            cpu)->cblist);
-       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
+       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start),
-              (long)rsp->gpnum, (long)rsp->completed, totqlen);
+              (long)rcu_seq_current(&rsp->gp_seq), totqlen);
        if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
 
                /* Complain about tasks blocking the grace period. */
                rcu_print_detail_task_stall(rsp);
        } else {
-               if (READ_ONCE(rsp->gpnum) != gpnum ||
-                   READ_ONCE(rsp->completed) == gpnum) {
+               if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
@@ -1441,6 +1430,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                        sched_show_task(current);
                }
        }
+       /* Rewrite if needed in case of slow consoles. */
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+               WRITE_ONCE(rsp->jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 
        rcu_check_gp_kthread_starvation(rsp);
 
@@ -1476,15 +1469,16 @@ static void print_cpu_stall(struct rcu_state *rsp)
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                                                            cpu)->cblist);
-       pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
+       pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
                jiffies - rsp->gp_start,
-               (long)rsp->gpnum, (long)rsp->completed, totqlen);
+               (long)rcu_seq_current(&rsp->gp_seq), totqlen);
 
        rcu_check_gp_kthread_starvation(rsp);
 
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       /* Rewrite if needed in case of slow consoles. */
        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
                WRITE_ONCE(rsp->jiffies_stall,
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -1504,10 +1498,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       unsigned long completed;
-       unsigned long gpnum;
+       unsigned long gs1;
+       unsigned long gs2;
        unsigned long gps;
        unsigned long j;
+       unsigned long jn;
        unsigned long js;
        struct rcu_node *rnp;
 
@@ -1520,43 +1515,46 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        /*
         * Lots of memory barriers to reject false positives.
         *
-        * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
-        * then rsp->gp_start, and finally rsp->completed.  These values
-        * are updated in the opposite order with memory barriers (or
-        * equivalent) during grace-period initialization and cleanup.
-        * Now, a false positive can occur if we get an new value of
-        * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
-        * the memory barriers, the only way that this can happen is if one
-        * grace period ends and another starts between these two fetches.
-        * Detect this by comparing rsp->completed with the previous fetch
-        * from rsp->gpnum.
+        * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
+        * then rsp->gp_start, and finally another copy of rsp->gp_seq.
+        * These values are updated in the opposite order with memory
+        * barriers (or equivalent) during grace-period initialization
+        * and cleanup.  Now, a false positive can occur if we get an new
+        * value of rsp->gp_start and a old value of rsp->jiffies_stall.
+        * But given the memory barriers, the only way that this can happen
+        * is if one grace period ends and another starts between these
+        * two fetches.  This is detected by comparing the second fetch
+        * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
         *
         * Given this check, comparisons of jiffies, rsp->jiffies_stall,
         * and rsp->gp_start suffice to forestall false positives.
         */
-       gpnum = READ_ONCE(rsp->gpnum);
-       smp_rmb(); /* Pick up ->gpnum first... */
+       gs1 = READ_ONCE(rsp->gp_seq);
+       smp_rmb(); /* Pick up ->gp_seq first... */
        js = READ_ONCE(rsp->jiffies_stall);
        smp_rmb(); /* ...then ->jiffies_stall before the rest... */
        gps = READ_ONCE(rsp->gp_start);
-       smp_rmb(); /* ...and finally ->gp_start before ->completed. */
-       completed = READ_ONCE(rsp->completed);
-       if (ULONG_CMP_GE(completed, gpnum) ||
+       smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+       gs2 = READ_ONCE(rsp->gp_seq);
+       if (gs1 != gs2 ||
            ULONG_CMP_LT(j, js) ||
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
+       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
        if (rcu_gp_in_progress(rsp) &&
-           (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
+           (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+           cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
 
        } else if (rcu_gp_in_progress(rsp) &&
-                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
+                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+                  cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
                /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(rsp, gpnum);
+               print_other_cpu_stall(rsp, gs2);
        }
 }
 
@@ -1577,123 +1575,99 @@ void rcu_cpu_stall_reset(void)
                WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
 }
 
-/*
- * Determine the value that ->completed will have at the end of the
- * next subsequent grace period.  This is used to tag callbacks so that
- * a CPU can invoke callbacks in a timely fashion even if that CPU has
- * been dyntick-idle for an extended period with callbacks under the
- * influence of RCU_FAST_NO_HZ.
- *
- * The caller must hold rnp->lock with interrupts disabled.
- */
-static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
-                                      struct rcu_node *rnp)
-{
-       raw_lockdep_assert_held_rcu_node(rnp);
-
-       /*
-        * If RCU is idle, we just wait for the next grace period.
-        * But we can only be sure that RCU is idle if we are looking
-        * at the root rcu_node structure -- otherwise, a new grace
-        * period might have started, but just not yet gotten around
-        * to initializing the current non-root rcu_node structure.
-        */
-       if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
-               return rnp->completed + 1;
-
-       /*
-        * If the current rcu_node structure believes that RCU is
-        * idle, and if the rcu_state structure does not yet reflect
-        * the start of a new grace period, then the next grace period
-        * will suffice.  The memory barrier is needed to accurately
-        * sample the rsp->gpnum, and pairs with the second lock
-        * acquisition in rcu_gp_init(), which is augmented with
-        * smp_mb__after_unlock_lock() for this purpose.
-        */
-       if (rnp->gpnum == rnp->completed) {
-               smp_mb(); /* See above block comment. */
-               if (READ_ONCE(rsp->gpnum) == rnp->completed)
-                       return rnp->completed + 1;
-       }
-
-       /*
-        * Otherwise, wait for a possible partial grace period and
-        * then the subsequent full grace period.
-        */
-       return rnp->completed + 2;
-}
-
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c, const char *s)
+                             unsigned long gp_seq_req, const char *s)
 {
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                     rnp->completed, c, rnp->level,
-                                     rnp->grplo, rnp->grphi, s);
+       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+                                     rnp->level, rnp->grplo, rnp->grphi, s);
 }
 
 /*
+ * rcu_start_this_gp - Request the start of a particular grace period
+ * @rnp_start: The leaf node of the CPU from which to start.
+ * @rdp: The rcu_data corresponding to the CPU from which to start.
+ * @gp_seq_req: The gp_seq of the grace period to start.
+ *
  * Start the specified grace period, as needed to handle newly arrived
  * callbacks.  The required future grace periods are recorded in each
- * rcu_node structure's ->need_future_gp[] field.  Returns true if there
+ * rcu_node structure's ->gp_seq_needed field.  Returns true if there
  * is reason to awaken the grace-period kthread.
  *
  * The caller must hold the specified rcu_node structure's ->lock, which
  * is why the caller is responsible for waking the grace-period kthread.
+ *
+ * Returns true if the GP thread needs to be awakened else false.
  */
-static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c)
+static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
+                             unsigned long gp_seq_req)
 {
        bool ret = false;
        struct rcu_state *rsp = rdp->rsp;
-       struct rcu_node *rnp_root;
+       struct rcu_node *rnp;
 
        /*
         * Use funnel locking to either acquire the root rcu_node
         * structure's lock or bail out if the need for this grace period
-        * has already been recorded -- or has already started.  If there
-        * is already a grace period in progress in a non-leaf node, no
-        * recording is needed because the end of the grace period will
-        * scan the leaf rcu_node structures.  Note that rnp->lock must
-        * not be released.
+        * has already been recorded -- or if that grace period has in
+        * fact already started.  If there is already a grace period in
+        * progress in a non-leaf node, no recording is needed because the
+        * end of the grace period will scan the leaf rcu_node structures.
+        * Note that rnp_start->lock must not be released.
         */
-       raw_lockdep_assert_held_rcu_node(rnp);
-       trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
-       for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
-               if (rnp_root != rnp)
-                       raw_spin_lock_rcu_node(rnp_root);
-               WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
-                                         need_future_gp_mask(), c));
-               if (need_future_gp_element(rnp_root, c) ||
-                   ULONG_CMP_GE(rnp_root->gpnum, c) ||
-                   (rnp != rnp_root &&
-                    rnp_root->gpnum != rnp_root->completed)) {
-                       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
+       raw_lockdep_assert_held_rcu_node(rnp_start);
+       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
+       for (rnp = rnp_start; 1; rnp = rnp->parent) {
+               if (rnp != rnp_start)
+                       raw_spin_lock_rcu_node(rnp);
+               if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
+                   rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
+                   (rnp != rnp_start &&
+                    rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
+                       trace_rcu_this_gp(rnp, rdp, gp_seq_req,
+                                         TPS("Prestarted"));
                        goto unlock_out;
                }
-               need_future_gp_element(rnp_root, c) = true;
-               if (rnp_root != rnp && rnp_root->parent != NULL)
-                       raw_spin_unlock_rcu_node(rnp_root);
-               if (!rnp_root->parent)
+               rnp->gp_seq_needed = gp_seq_req;
+               if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
+                       /*
+                        * We just marked the leaf or internal node, and a
+                        * grace period is in progress, which means that
+                        * rcu_gp_cleanup() will see the marking.  Bail to
+                        * reduce contention.
+                        */
+                       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
+                                         TPS("Startedleaf"));
+                       goto unlock_out;
+               }
+               if (rnp != rnp_start && rnp->parent != NULL)
+                       raw_spin_unlock_rcu_node(rnp);
+               if (!rnp->parent)
                        break;  /* At root, and perhaps also leaf. */
        }
 
        /* If GP already in progress, just leave, otherwise start one. */
-       if (rnp_root->gpnum != rnp_root->completed) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
+       if (rcu_gp_in_progress(rsp)) {
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
                goto unlock_out;
        }
-       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
+       trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
        WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
+       rsp->gp_req_activity = jiffies;
        if (!rsp->gp_kthread) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
                goto unlock_out;
        }
-       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
+       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
        ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
-       if (rnp != rnp_root)
-               raw_spin_unlock_rcu_node(rnp_root);
+       /* Push furthest requested GP to leaf node and rcu_data structure. */
+       if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
+               rnp_start->gp_seq_needed = rnp->gp_seq_needed;
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       }
+       if (rnp != rnp_start)
+               raw_spin_unlock_rcu_node(rnp);
        return ret;
 }
 
@@ -1703,13 +1677,13 @@ unlock_out:
  */
 static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-       unsigned long c = rnp->completed;
        bool needmore;
        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
-       need_future_gp_element(rnp, c) = false;
-       needmore = need_any_future_gp(rnp);
-       trace_rcu_this_gp(rnp, rdp, c,
+       needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
+       if (!needmore)
+               rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
+       trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
                          needmore ? TPS("CleanupMore") : TPS("Cleanup"));
        return needmore;
 }
@@ -1731,21 +1705,21 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
 }
 
 /*
- * If there is room, assign a ->completed number to any callbacks on
- * this CPU that have not already been assigned.  Also accelerate any
- * callbacks that were previously assigned a ->completed number that has
- * since proven to be too conservative, which can happen if callbacks get
- * assigned a ->completed number while RCU is idle, but with reference to
- * a non-root rcu_node structure.  This function is idempotent, so it does
- * not hurt to call it repeatedly.  Returns an flag saying that we should
- * awaken the RCU grace-period kthread.
+ * If there is room, assign a ->gp_seq number to any callbacks on this
+ * CPU that have not already been assigned.  Also accelerate any callbacks
+ * that were previously assigned a ->gp_seq number that has since proven
+ * to be too conservative, which can happen if callbacks get assigned a
+ * ->gp_seq number while RCU is idle, but with reference to a non-root
+ * rcu_node structure.  This function is idempotent, so it does not hurt
+ * to call it repeatedly.  Returns an flag saying that we should awaken
+ * the RCU grace-period kthread.
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                               struct rcu_data *rdp)
 {
-       unsigned long c;
+       unsigned long gp_seq_req;
        bool ret = false;
 
        raw_lockdep_assert_held_rcu_node(rnp);
@@ -1764,22 +1738,50 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
         * accelerating callback invocation to an earlier grace-period
         * number.
         */
-       c = rcu_cbs_completed(rsp, rnp);
-       if (rcu_segcblist_accelerate(&rdp->cblist, c))
-               ret = rcu_start_this_gp(rnp, rdp, c);
+       gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+       if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
+               ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
 
        /* Trace depending on how much we were able to accelerate. */
        if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
        else
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
        return ret;
 }
 
+/*
+ * Similar to rcu_accelerate_cbs(), but does not require that the leaf
+ * rcu_node structure's ->lock be held.  It consults the cached value
+ * of ->gp_seq_needed in the rcu_data structure, and if that indicates
+ * that a new grace-period request be made, invokes rcu_accelerate_cbs()
+ * while holding the leaf rcu_node structure's ->lock.
+ */
+static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
+                                       struct rcu_node *rnp,
+                                       struct rcu_data *rdp)
+{
+       unsigned long c;
+       bool needwake;
+
+       lockdep_assert_irqs_disabled();
+       c = rcu_seq_snap(&rsp->gp_seq);
+       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+               /* Old request still live, so mark recent callbacks. */
+               (void)rcu_segcblist_accelerate(&rdp->cblist, c);
+               return;
+       }
+       raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+       raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+       if (needwake)
+               rcu_gp_kthread_wake(rsp);
+}
+
 /*
  * Move any callbacks whose grace period has completed to the
  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
- * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
+ * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
  * sublist.  This function is idempotent, so it does not hurt to
  * invoke it repeatedly.  As long as it is not invoked -too- often...
  * Returns true if the RCU grace-period kthread needs to be awakened.
@@ -1796,10 +1798,10 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                return false;
 
        /*
-        * Find all callbacks whose ->completed numbers indicate that they
+        * Find all callbacks whose ->gp_seq numbers indicate that they
         * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
         */
-       rcu_segcblist_advance(&rdp->cblist, rnp->completed);
+       rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
 
        /* Classify any remaining callbacks. */
        return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1819,39 +1821,38 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 
        raw_lockdep_assert_held_rcu_node(rnp);
 
-       /* Handle the ends of any preceding grace periods first. */
-       if (rdp->completed == rnp->completed &&
-           !unlikely(READ_ONCE(rdp->gpwrap))) {
-
-               /* No grace period end, so just accelerate recent callbacks. */
-               ret = rcu_accelerate_cbs(rsp, rnp, rdp);
+       if (rdp->gp_seq == rnp->gp_seq)
+               return false; /* Nothing to do. */
 
+       /* Handle the ends of any preceding grace periods first. */
+       if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
+           unlikely(READ_ONCE(rdp->gpwrap))) {
+               ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
        } else {
-
-               /* Advance callbacks. */
-               ret = rcu_advance_cbs(rsp, rnp, rdp);
-
-               /* Remember that we saw this grace-period completion. */
-               rdp->completed = rnp->completed;
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
+               ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
        }
 
-       if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
+       /* Now handle the beginnings of any new-to-this-CPU grace periods. */
+       if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
+           unlikely(READ_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
                 * go looking for one.
                 */
-               rdp->gpnum = rnp->gpnum;
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
+               trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
                need_gp = !!(rnp->qsmask & rdp->grpmask);
                rdp->cpu_no_qs.b.norm = need_gp;
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
                rdp->core_needs_qs = need_gp;
                zero_cpu_stall_ticks(rdp);
-               WRITE_ONCE(rdp->gpwrap, false);
-               rcu_gpnum_ovf(rnp, rdp);
        }
+       rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
+       if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       WRITE_ONCE(rdp->gpwrap, false);
+       rcu_gpnum_ovf(rnp, rdp);
        return ret;
 }
 
@@ -1863,8 +1864,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 
        local_irq_save(flags);
        rnp = rdp->mynode;
-       if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
-            rdp->completed == READ_ONCE(rnp->completed) &&
+       if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
             !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
                local_irq_restore(flags);
@@ -1879,7 +1879,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 static void rcu_gp_slow(struct rcu_state *rsp, int delay)
 {
        if (delay > 0 &&
-           !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+           !(rcu_seq_ctr(rsp->gp_seq) %
+             (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
                schedule_timeout_uninterruptible(delay);
 }
 
@@ -1888,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  */
 static bool rcu_gp_init(struct rcu_state *rsp)
 {
+       unsigned long flags;
        unsigned long oldmask;
+       unsigned long mask;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
@@ -1912,9 +1915,9 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
        /* Advance to a new grace period and initialize state. */
        record_gp_stall_check_time(rsp);
-       /* Record GP times before starting GP, hence smp_store_release(). */
-       smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
-       trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+       /* Record GP times before starting GP, hence rcu_seq_start(). */
+       rcu_seq_start(&rsp->gp_seq);
+       trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
        raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
@@ -1923,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * for subsequent online CPUs, and that quiescent-state forcing
         * will handle subsequent offline CPUs.
         */
+       rsp->gp_state = RCU_GP_ONOFF;
        rcu_for_each_leaf_node(rsp, rnp) {
-               rcu_gp_slow(rsp, gp_preinit_delay);
+               spin_lock(&rsp->ofl_lock);
                raw_spin_lock_irq_rcu_node(rnp);
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
                        raw_spin_unlock_irq_rcu_node(rnp);
+                       spin_unlock(&rsp->ofl_lock);
                        continue;
                }
 
@@ -1939,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
                /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
                if (!oldmask != !rnp->qsmaskinit) {
-                       if (!oldmask) /* First online CPU for this rcu_node. */
-                               rcu_init_new_rnp(rnp);
-                       else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
-                               rnp->wait_blkd_tasks = true;
-                       else /* Last offline CPU and can propagate. */
+                       if (!oldmask) { /* First online CPU for rcu_node. */
+                               if (!rnp->wait_blkd_tasks) /* Ever offline? */
+                                       rcu_init_new_rnp(rnp);
+                       } else if (rcu_preempt_has_tasks(rnp)) {
+                               rnp->wait_blkd_tasks = true; /* blocked tasks */
+                       } else { /* Last offline CPU and can propagate. */
                                rcu_cleanup_dead_rnp(rnp);
+                       }
                }
 
                /*
@@ -1953,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                 * still offline, propagate up the rcu_node tree and
                 * clear ->wait_blkd_tasks.  Otherwise, if one of this
                 * rcu_node structure's CPUs has since come back online,
-                * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
-                * checks for this, so just call it unconditionally).
+                * simply clear ->wait_blkd_tasks.
                 */
                if (rnp->wait_blkd_tasks &&
-                   (!rcu_preempt_has_tasks(rnp) ||
-                    rnp->qsmaskinit)) {
+                   (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
                        rnp->wait_blkd_tasks = false;
-                       rcu_cleanup_dead_rnp(rnp);
+                       if (!rnp->qsmaskinit)
+                               rcu_cleanup_dead_rnp(rnp);
                }
 
                raw_spin_unlock_irq_rcu_node(rnp);
+               spin_unlock(&rsp->ofl_lock);
        }
+       rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
@@ -1978,22 +1986,27 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * The grace period cannot complete until the initialization
         * process finishes, because this kthread handles both.
         */
+       rsp->gp_state = RCU_GP_INIT;
        rcu_for_each_node_breadth_first(rsp, rnp) {
                rcu_gp_slow(rsp, gp_init_delay);
-               raw_spin_lock_irq_rcu_node(rnp);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rdp = this_cpu_ptr(rsp->rda);
-               rcu_preempt_check_blocked_tasks(rnp);
+               rcu_preempt_check_blocked_tasks(rsp, rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               WRITE_ONCE(rnp->gpnum, rsp->gpnum);
-               if (WARN_ON_ONCE(rnp->completed != rsp->completed))
-                       WRITE_ONCE(rnp->completed, rsp->completed);
+               WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
                if (rnp == rdp->mynode)
                        (void)__note_gp_changes(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
-               trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+               trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irq_rcu_node(rnp);
+               /* Quiescent states for tasks on any now-offline CPUs. */
+               mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+               rnp->rcu_gp_init_mask = mask;
+               if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               else
+                       raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_tasks_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
@@ -2053,6 +2066,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 {
        unsigned long gp_duration;
        bool needgp = false;
+       unsigned long new_gp_seq;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
        struct swait_queue_head *sq;
@@ -2074,19 +2088,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
-        * Propagate new ->completed value to rcu_node structures so
-        * that other CPUs don't have to wait until the start of the next
-        * grace period to process their callbacks.  This also avoids
-        * some nasty RCU grace-period initialization races by forcing
-        * the end of the current grace period to be completely recorded in
-        * all of the rcu_node structures before the beginning of the next
-        * grace period is recorded in any of the rcu_node structures.
+        * Propagate new ->gp_seq value to rcu_node structures so that
+        * other CPUs don't have to wait until the start of the next grace
+        * period to process their callbacks.  This also avoids some nasty
+        * RCU grace-period initialization races by forcing the end of
+        * the current grace period to be completely recorded in all of
+        * the rcu_node structures before the beginning of the next grace
+        * period is recorded in any of the rcu_node structures.
         */
+       new_gp_seq = rsp->gp_seq;
+       rcu_seq_end(&new_gp_seq);
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq_rcu_node(rnp);
-               WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+               if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+                       dump_blkd_tasks(rsp, rnp, 10);
                WARN_ON_ONCE(rnp->qsmask);
-               WRITE_ONCE(rnp->completed, rsp->gpnum);
+               WRITE_ONCE(rnp->gp_seq, new_gp_seq);
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -2100,26 +2117,28 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                rcu_gp_slow(rsp, gp_cleanup_delay);
        }
        rnp = rcu_get_root(rsp);
-       raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
+       raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
 
        /* Declare grace period done. */
-       WRITE_ONCE(rsp->completed, rsp->gpnum);
-       trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
+       rcu_seq_end(&rsp->gp_seq);
+       trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
        rsp->gp_state = RCU_GP_IDLE;
        /* Check for GP requests since above loop. */
        rdp = this_cpu_ptr(rsp->rda);
-       if (need_any_future_gp(rnp)) {
-               trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
+       if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
+               trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
                                  TPS("CleanupMore"));
                needgp = true;
        }
        /* Advance CBs to reduce false positives below. */
        if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
                WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
-               trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
+               rsp->gp_req_activity = jiffies;
+               trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
                                       TPS("newreq"));
+       } else {
+               WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
        }
-       WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
        raw_spin_unlock_irq_rcu_node(rnp);
 }
 
@@ -2141,7 +2160,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                /* Handle grace-period start. */
                for (;;) {
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
                        swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
@@ -2154,17 +2173,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        WRITE_ONCE(rsp->gp_activity, jiffies);
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("reqwaitsig"));
                }
 
                /* Handle quiescent-state forcing. */
                first_gp_fqs = true;
                j = jiffies_till_first_fqs;
-               if (j > HZ) {
-                       j = HZ;
-                       jiffies_till_first_fqs = HZ;
-               }
                ret = 0;
                for (;;) {
                        if (!ret) {
@@ -2173,7 +2188,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                           jiffies + 3 * j);
                        }
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
                        ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
@@ -2188,31 +2203,24 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
                            (gf & RCU_GP_FLAG_FQS)) {
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqsstart"));
                                rcu_gp_fqs(rsp, first_gp_fqs);
                                first_gp_fqs = false;
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqsend"));
                                cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                ret = 0; /* Force full wait till next FQS. */
                                j = jiffies_till_next_fqs;
-                               if (j > HZ) {
-                                       j = HZ;
-                                       jiffies_till_next_fqs = HZ;
-                               } else if (j < 1) {
-                                       j = 1;
-                                       jiffies_till_next_fqs = 1;
-                               }
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqswaitsig"));
                                ret = 1; /* Keep old FQS timing. */
                                j = jiffies;
@@ -2256,8 +2264,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * must be represented by the same rcu_node structure (which need not be a
  * leaf rcu_node structure, though it often will be).  The gps parameter
  * is the grace-period snapshot, which means that the quiescent states
- * are valid only if rnp->gpnum is equal to gps.  That structure's lock
+ * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
  * must be held upon entry, and it is released before return.
+ *
+ * As a special case, if mask is zero, the bit-already-cleared check is
+ * disabled.  This allows propagating quiescent state due to resumed tasks
+ * during grace-period initialization.
  */
 static void
 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2271,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 
        /* Walk up the rcu_node hierarchy. */
        for (;;) {
-               if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
+               if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
 
                        /*
                         * Our bit has already been cleared, or the
@@ -2284,7 +2296,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
                             rcu_preempt_blocked_readers_cgp(rnp));
                rnp->qsmask &= ~mask;
-               trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
+               trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
                                                 mask, rnp->qsmask, rnp->level,
                                                 rnp->grplo, rnp->grphi,
                                                 !!rnp->gp_tasks);
@@ -2294,6 +2306,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
+               rnp->completedqs = rnp->gp_seq;
                mask = rnp->grpmask;
                if (rnp->parent == NULL) {
 
@@ -2323,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  * irqs disabled, and this lock is released upon return, but irqs remain
  * disabled.
  */
-static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
-                                     struct rcu_node *rnp, unsigned long flags)
+static void __maybe_unused
+rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+                         struct rcu_node *rnp, unsigned long flags)
        __releases(rnp->lock)
 {
        unsigned long gps;
@@ -2332,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        struct rcu_node *rnp_p;
 
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
-           rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+       if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
+           WARN_ON_ONCE(rsp != rcu_state_p) ||
+           WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
+           rnp->qsmask != 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;  /* Still need more quiescent states! */
        }
 
+       rnp->completedqs = rnp->gp_seq;
        rnp_p = rnp->parent;
        if (rnp_p == NULL) {
                /*
@@ -2348,8 +2365,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
                return;
        }
 
-       /* Report up the rest of the hierarchy, tracking current ->gpnum. */
-       gps = rnp->gpnum;
+       /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
+       gps = rnp->gp_seq;
        mask = rnp->grpmask;
        raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
@@ -2370,8 +2387,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 
        rnp = rdp->mynode;
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
-           rnp->completed == rnp->gpnum || rdp->gpwrap) {
+       if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
+           rdp->gpwrap) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -2396,7 +2413,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 
-               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
                /* ^^^ Released rnp->lock */
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
@@ -2441,17 +2458,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 {
-       RCU_TRACE(unsigned long mask;)
+       RCU_TRACE(bool blkd;)
        RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
        RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
 
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
                return;
 
-       RCU_TRACE(mask = rdp->grpmask;)
-       trace_rcu_grace_period(rsp->name,
-                              rnp->gpnum + 1 - !!(rnp->qsmask & mask),
-                              TPS("cpuofl"));
+       RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
+       trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+                              blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
 }
 
 /*
@@ -2463,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  * This function therefore goes up the tree of rcu_node structures,
  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
  * the leaf rcu_node structure's ->qsmaskinit field has already been
- * updated
+ * updated.
  *
  * This function does check that the specified rcu_node structure has
  * all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2476,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
        long mask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
-           rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+           WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
+           WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
                return;
        for (;;) {
                mask = rnp->grpmask;
@@ -2487,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                        break;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                rnp->qsmaskinit &= ~mask;
-               rnp->qsmask &= ~mask;
+               /* Between grace periods, so better already be zero! */
+               WARN_ON_ONCE(rnp->qsmask);
                if (rnp->qsmaskinit) {
                        raw_spin_unlock_rcu_node(rnp);
                        /* irqs remain disabled. */
@@ -2630,6 +2648,7 @@ void rcu_check_callbacks(int user)
 
                rcu_sched_qs();
                rcu_bh_qs();
+               rcu_note_voluntary_context_switch(current);
 
        } else if (!in_softirq()) {
 
@@ -2645,8 +2664,7 @@ void rcu_check_callbacks(int user)
        rcu_preempt_check_callbacks();
        if (rcu_pending())
                invoke_rcu_core();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
+
        trace_rcu_utilization(TPS("End scheduler-tick"));
 }
 
@@ -2681,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                                /* rcu_initiate_boost() releases rnp->lock */
                                continue;
                        }
-                       if (rnp->parent &&
-                           (rnp->parent->qsmask & rnp->grpmask)) {
-                               /*
-                                * Race between grace-period
-                                * initialization and task exiting RCU
-                                * read-side critical section: Report.
-                                */
-                               rcu_report_unblock_qs_rnp(rsp, rnp, flags);
-                               /* rcu_report_unblock_qs_rnp() rlses ->lock */
-                               continue;
-                       }
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       continue;
                }
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2701,8 +2710,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                        }
                }
                if (mask != 0) {
-                       /* Idle/offline CPUs, report (releases rnp->lock. */
-                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+                       /* Idle/offline CPUs, report (releases rnp->lock). */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
                } else {
                        /* Nothing to do here, so just drop the lock. */
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2746,6 +2755,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
        rcu_gp_kthread_wake(rsp);
 }
 
+/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void
+rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
+                        struct rcu_data *rdp)
+{
+       const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
+       unsigned long flags;
+       unsigned long j;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
+       static atomic_t warned = ATOMIC_INIT(0);
+
+       if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+               return;
+       j = jiffies; /* Expensive access, and in common case don't get here. */
+       if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+           atomic_read(&warned))
+               return;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       j = jiffies;
+       if (rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+           atomic_read(&warned)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       /* Hold onto the leaf lock to make others see warned==1. */
+
+       if (rnp_root != rnp)
+               raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+       j = jiffies;
+       if (rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, rsp->gp_req_activity + gpssdelay) ||
+           time_before(j, rsp->gp_activity + gpssdelay) ||
+           atomic_xchg(&warned, 1)) {
+               raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
+                __func__, (long)READ_ONCE(rsp->gp_seq),
+                (long)READ_ONCE(rnp_root->gp_seq_needed),
+                j - rsp->gp_req_activity, j - rsp->gp_activity,
+                rsp->gp_flags, rsp->gp_state, rsp->name,
+                rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+       WARN_ON(1);
+       if (rnp_root != rnp)
+               raw_spin_unlock_rcu_node(rnp_root);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
 /*
  * This does the RCU core processing work for the specified rcu_state
  * and rcu_data structures.  This may be called only from the CPU to
@@ -2755,9 +2823,8 @@ static void
 __rcu_process_callbacks(struct rcu_state *rsp)
 {
        unsigned long flags;
-       bool needwake;
        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
-       struct rcu_node *rnp;
+       struct rcu_node *rnp = rdp->mynode;
 
        WARN_ON_ONCE(!rdp->beenonline);
 
@@ -2768,18 +2835,13 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        if (!rcu_gp_in_progress(rsp) &&
            rcu_segcblist_is_enabled(&rdp->cblist)) {
                local_irq_save(flags);
-               if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
-                       local_irq_restore(flags);
-               } else {
-                       rnp = rdp->mynode;
-                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
-                       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                       if (needwake)
-                               rcu_gp_kthread_wake(rsp);
-               }
+               if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
+                       rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+               local_irq_restore(flags);
        }
 
+       rcu_check_gp_start_stall(rsp, rnp, rdp);
+
        /* If there are callbacks ready, invoke them. */
        if (rcu_segcblist_ready_cbs(&rdp->cblist))
                invoke_rcu_callbacks(rsp, rdp);
@@ -2833,8 +2895,6 @@ static void invoke_rcu_core(void)
 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
                            struct rcu_head *head, unsigned long flags)
 {
-       bool needwake;
-
        /*
         * If called from an extended quiescent state, invoke the RCU
         * core in order to force a re-evaluation of RCU's idleness.
@@ -2861,13 +2921,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 
                /* Start a new grace period if one not already started. */
                if (!rcu_gp_in_progress(rsp)) {
-                       struct rcu_node *rnp = rdp->mynode;
-
-                       raw_spin_lock_rcu_node(rnp);
-                       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-                       raw_spin_unlock_rcu_node(rnp);
-                       if (needwake)
-                               rcu_gp_kthread_wake(rsp);
+                       rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
                } else {
                        /* Give the grace period a kick. */
                        rdp->blimit = LONG_MAX;
@@ -3037,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
  * when there was in fact only one the whole time, as this just adds
  * some overhead: RCU still operates correctly.
  */
-static inline int rcu_blocking_is_gp(void)
+static int rcu_blocking_is_gp(void)
 {
        int ret;
 
@@ -3136,16 +3190,10 @@ unsigned long get_state_synchronize_rcu(void)
 {
        /*
         * Any prior manipulation of RCU-protected data must happen
-        * before the load from ->gpnum.
+        * before the load from ->gp_seq.
         */
        smp_mb();  /* ^^^ */
-
-       /*
-        * Make sure this load happens before the purportedly
-        * time-consuming work between get_state_synchronize_rcu()
-        * and cond_synchronize_rcu().
-        */
-       return smp_load_acquire(&rcu_state_p->gpnum);
+       return rcu_seq_snap(&rcu_state_p->gp_seq);
 }
 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
 
@@ -3165,15 +3213,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  */
 void cond_synchronize_rcu(unsigned long oldstate)
 {
-       unsigned long newstate;
-
-       /*
-        * Ensure that this load happens before any RCU-destructive
-        * actions the caller might carry out after we return.
-        */
-       newstate = smp_load_acquire(&rcu_state_p->completed);
-       if (ULONG_CMP_GE(oldstate, newstate))
+       if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
                synchronize_rcu();
+       else
+               smp_mb(); /* Ensure GP ends before subsequent accesses. */
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
@@ -3188,16 +3231,10 @@ unsigned long get_state_synchronize_sched(void)
 {
        /*
         * Any prior manipulation of RCU-protected data must happen
-        * before the load from ->gpnum.
+        * before the load from ->gp_seq.
         */
        smp_mb();  /* ^^^ */
-
-       /*
-        * Make sure this load happens before the purportedly
-        * time-consuming work between get_state_synchronize_sched()
-        * and cond_synchronize_sched().
-        */
-       return smp_load_acquire(&rcu_sched_state.gpnum);
+       return rcu_seq_snap(&rcu_sched_state.gp_seq);
 }
 EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
 
@@ -3217,15 +3254,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
  */
 void cond_synchronize_sched(unsigned long oldstate)
 {
-       unsigned long newstate;
-
-       /*
-        * Ensure that this load happens before any RCU-destructive
-        * actions the caller might carry out after we return.
-        */
-       newstate = smp_load_acquire(&rcu_sched_state.completed);
-       if (ULONG_CMP_GE(oldstate, newstate))
+       if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
                synchronize_sched();
+       else
+               smp_mb(); /* Ensure GP ends before subsequent accesses. */
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_sched);
 
@@ -3261,12 +3293,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
            !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
                return 1;
 
-       /* Has another RCU grace period completed?  */
-       if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
-               return 1;
-
-       /* Has a new RCU grace period started? */
-       if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+       /* Have RCU grace period completed or started?  */
+       if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
            unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
                return 1;
 
@@ -3298,7 +3326,7 @@ static int rcu_pending(void)
  * non-NULL, store an indication of whether all callbacks are lazy.
  * (If there are no callbacks, all of them are deemed to be lazy.)
  */
-static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool rcu_cpu_has_callbacks(bool *all_lazy)
 {
        bool al = true;
        bool hc = false;
@@ -3484,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
 {
        long mask;
+       long oldmask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
+       WARN_ON_ONCE(rnp->wait_blkd_tasks);
        for (;;) {
                mask = rnp->grpmask;
                rnp = rnp->parent;
                if (rnp == NULL)
                        return;
                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
+               oldmask = rnp->qsmaskinit;
                rnp->qsmaskinit |= mask;
                raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
+               if (oldmask)
+                       return;
        }
 }
 
@@ -3511,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
+       rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+       rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
+       rdp->rcu_onl_gp_seq = rsp->gp_seq;
+       rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -3518,9 +3555,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 
 /*
  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
- * offline event can be happening at a given time.  Note also that we
- * can accept some slop in the rsp->completed access due to the fact
- * that this CPU cannot possibly have any RCU callbacks in flight yet.
+ * offline event can be happening at a given time.  Note also that we can
+ * accept some slop in the rsp->gp_seq access due to the fact that this
+ * CPU cannot possibly have any RCU callbacks in flight yet.
  */
 static void
 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
@@ -3549,14 +3586,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
        rdp->beenonline = true;  /* We have now been online. */
-       rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
-       rdp->completed = rnp->completed;
+       rdp->gp_seq = rnp->gp_seq;
+       rdp->gp_seq_needed = rnp->gp_seq;
        rdp->cpu_no_qs.b.norm = true;
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
        rdp->rcu_iw_pending = false;
-       rdp->rcu_iw_gpnum = rnp->gpnum - 1;
-       trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+       rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
+       trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
@@ -3705,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
                nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
                /* Allow lockless access for expedited grace periods. */
                smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+               rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+               rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+               if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+                       /* Report QS -after- changing ->qsmaskinitnext! */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               } else {
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               }
        }
        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
 }
@@ -3713,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinitnext
  * bit masks.
  */
 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3725,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
 
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
+       spin_lock(&rsp->ofl_lock);
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+       rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
+       rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+       if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
+               /* Report quiescent state -before- changing ->qsmaskinitnext! */
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       }
        rnp->qsmaskinitnext &= ~mask;
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       spin_unlock(&rsp->ofl_lock);
 }
 
 /*
@@ -3839,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
        struct task_struct *t;
 
        /* Force priority into range. */
-       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
+           && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
+               kthread_prio = 2;
+       else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
                kthread_prio = 1;
        else if (kthread_prio < 0)
                kthread_prio = 0;
        else if (kthread_prio > 99)
                kthread_prio = 99;
+
        if (kthread_prio != kthread_prio_in)
                pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
                         kthread_prio, kthread_prio_in);
@@ -3928,8 +3986,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                        raw_spin_lock_init(&rnp->fqslock);
                        lockdep_set_class_and_name(&rnp->fqslock,
                                                   &rcu_fqs_class[i], fqs[i]);
-                       rnp->gpnum = rsp->gpnum;
-                       rnp->completed = rsp->completed;
+                       rnp->gp_seq = rsp->gp_seq;
+                       rnp->gp_seq_needed = rsp->gp_seq;
+                       rnp->completedqs = rsp->gp_seq;
                        rnp->qsmask = 0;
                        rnp->qsmaskinit = 0;
                        rnp->grplo = j * cpustride;
@@ -3997,7 +4056,7 @@ static void __init rcu_init_geometry(void)
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
            nr_cpu_ids == NR_CPUS)
                return;
-       pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
+       pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
                rcu_fanout_leaf, nr_cpu_ids);
 
        /*
index 78e051d..4e74df7 100644 (file)
@@ -81,18 +81,16 @@ struct rcu_node {
        raw_spinlock_t __private lock;  /* Root rcu_node's lock protects */
                                        /*  some rcu_state fields as well as */
                                        /*  following. */
-       unsigned long gpnum;    /* Current grace period for this node. */
-                               /*  This will either be equal to or one */
-                               /*  behind the root rcu_node's gpnum. */
-       unsigned long completed; /* Last GP completed for this node. */
-                               /*  This will either be equal to or one */
-                               /*  behind the root rcu_node's gpnum. */
+       unsigned long gp_seq;   /* Track rsp->rcu_gp_seq. */
+       unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+       unsigned long completedqs; /* All QSes done for this node. */
        unsigned long qsmask;   /* CPUs or groups that need to switch in */
                                /*  order for current grace period to proceed.*/
                                /*  In leaf rcu_node, each bit corresponds to */
                                /*  an rcu_data structure, otherwise, each */
                                /*  bit corresponds to a child rcu_node */
                                /*  structure. */
+       unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
        unsigned long qsmaskinit;
                                /* Per-GP initial value for qsmask. */
                                /*  Initialized from ->qsmaskinitnext at the */
@@ -158,7 +156,6 @@ struct rcu_node {
        struct swait_queue_head nocb_gp_wq[2];
                                /* Place for rcu_nocb_kthread() to wait GP. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-       u8 need_future_gp[4];   /* Counts of upcoming GP requests. */
        raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
 
        spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
@@ -168,22 +165,6 @@ struct rcu_node {
        bool exp_need_flush;    /* Need to flush workitem? */
 } ____cacheline_internodealigned_in_smp;
 
-/* Accessors for ->need_future_gp[] array. */
-#define need_future_gp_mask() \
-       (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
-#define need_future_gp_element(rnp, c) \
-       ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
-#define need_any_future_gp(rnp)                                                \
-({                                                                     \
-       int __i;                                                        \
-       bool __nonzero = false;                                         \
-                                                                       \
-       for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++)   \
-               __nonzero = __nonzero ||                                \
-                           READ_ONCE((rnp)->need_future_gp[__i]);      \
-       __nonzero;                                                      \
-})
-
 /*
  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
  * are indexed relative to this interval rather than the global CPU ID space.
@@ -206,16 +187,14 @@ union rcu_noqs {
 /* Per-CPU data for read-copy update. */
 struct rcu_data {
        /* 1) quiescent-state and grace-period handling : */
-       unsigned long   completed;      /* Track rsp->completed gp number */
-                                       /*  in order to detect GP end. */
-       unsigned long   gpnum;          /* Highest gp number that this CPU */
-                                       /*  is aware of having started. */
+       unsigned long   gp_seq;         /* Track rsp->rcu_gp_seq counter. */
+       unsigned long   gp_seq_needed;  /* Track rsp->rcu_gp_seq_needed ctr. */
        unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
                                        /*  for rcu_all_qs() invocations. */
        union rcu_noqs  cpu_no_qs;      /* No QSes yet for this CPU. */
        bool            core_needs_qs;  /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
-       bool            gpwrap;         /* Possible gpnum/completed wrap. */
+       bool            gpwrap;         /* Possible ->gp_seq wrap. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
        unsigned long   ticks_this_gp;  /* The number of scheduling-clock */
@@ -239,7 +218,6 @@ struct rcu_data {
 
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
        unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
-       unsigned long offline_fqs;      /* Kicked due to being offline. */
        unsigned long cond_resched_completed;
                                        /* Grace period that needs help */
                                        /*  from cond_resched(). */
@@ -278,12 +256,16 @@ struct rcu_data {
                                        /* Leader CPU takes GP-end wakeups. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
-       /* 7) RCU CPU stall data. */
+       /* 7) Diagnostic data, including RCU CPU stall warnings. */
        unsigned int softirq_snap;      /* Snapshot of softirq activity. */
        /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
        struct irq_work rcu_iw;         /* Check for non-irq activity. */
        bool rcu_iw_pending;            /* Is ->rcu_iw pending? */
-       unsigned long rcu_iw_gpnum;     /* ->gpnum associated with ->rcu_iw. */
+       unsigned long rcu_iw_gp_seq;    /* ->gp_seq associated with ->rcu_iw. */
+       unsigned long rcu_ofl_gp_seq;   /* ->gp_seq at last offline. */
+       short rcu_ofl_gp_flags;         /* ->gp_flags at last offline. */
+       unsigned long rcu_onl_gp_seq;   /* ->gp_seq at last online. */
+       short rcu_onl_gp_flags;         /* ->gp_flags at last online. */
 
        int cpu;
        struct rcu_state *rsp;
@@ -340,8 +322,7 @@ struct rcu_state {
 
        u8      boost ____cacheline_internodealigned_in_smp;
                                                /* Subject to priority boost. */
-       unsigned long gpnum;                    /* Current gp number. */
-       unsigned long completed;                /* # of last completed gp. */
+       unsigned long gp_seq;                   /* Grace-period sequence #. */
        struct task_struct *gp_kthread;         /* Task for grace periods. */
        struct swait_queue_head gp_wq;          /* Where GP task waits. */
        short gp_flags;                         /* Commands for GP task. */
@@ -373,6 +354,8 @@ struct rcu_state {
                                                /*  but in jiffies. */
        unsigned long gp_activity;              /* Time of last GP kthread */
                                                /*  activity in jiffies. */
+       unsigned long gp_req_activity;          /* Time of last GP request */
+                                               /*  in jiffies. */
        unsigned long jiffies_stall;            /* Time at which to check */
                                                /*  for CPU stalls. */
        unsigned long jiffies_resched;          /* Time at which to resched */
@@ -384,6 +367,10 @@ struct rcu_state {
        const char *name;                       /* Name of structure. */
        char abbr;                              /* Abbreviated name. */
        struct list_head flavors;               /* List of RCU flavors. */
+
+       spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+                                               /* Synchronize offline with */
+                                               /*  GP pre-initialization. */
 };
 
 /* Values for rcu_state structure's gp_flags field. */
@@ -394,16 +381,20 @@ struct rcu_state {
 #define RCU_GP_IDLE     0      /* Initial state and no GP in progress. */
 #define RCU_GP_WAIT_GPS  1     /* Wait for grace-period start. */
 #define RCU_GP_DONE_GPS  2     /* Wait done for grace-period start. */
-#define RCU_GP_WAIT_FQS  3     /* Wait for force-quiescent-state time. */
-#define RCU_GP_DOING_FQS 4     /* Wait done for force-quiescent-state time. */
-#define RCU_GP_CLEANUP   5     /* Grace-period cleanup started. */
-#define RCU_GP_CLEANED   6     /* Grace-period cleanup complete. */
+#define RCU_GP_ONOFF     3     /* Grace-period initialization hotplug. */
+#define RCU_GP_INIT      4     /* Grace-period initialization. */
+#define RCU_GP_WAIT_FQS  5     /* Wait for force-quiescent-state time. */
+#define RCU_GP_DOING_FQS 6     /* Wait done for force-quiescent-state time. */
+#define RCU_GP_CLEANUP   7     /* Grace-period cleanup started. */
+#define RCU_GP_CLEANED   8     /* Grace-period cleanup complete. */
 
 #ifndef RCU_TREE_NONCORE
 static const char * const gp_state_names[] = {
        "RCU_GP_IDLE",
        "RCU_GP_WAIT_GPS",
        "RCU_GP_DONE_GPS",
+       "RCU_GP_ONOFF",
+       "RCU_GP_INIT",
        "RCU_GP_WAIT_FQS",
        "RCU_GP_DOING_FQS",
        "RCU_GP_CLEANUP",
@@ -449,10 +440,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
+                                           struct rcu_node *rnp);
 static void rcu_preempt_check_callbacks(void);
 void call_rcu(struct rcu_head *head, rcu_callback_t func);
 static void __init __rcu_init_preempt(void);
+static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
+                           int ncheck);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
@@ -489,7 +483,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
 #ifdef CONFIG_RCU_NOCB_CPU
 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_bind_gp_kthread(void);
 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
index d428cc1..0b2c2ad 100644 (file)
@@ -472,6 +472,7 @@ retry_ipi:
 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                                     smp_call_func_t func)
 {
+       int cpu;
        struct rcu_node *rnp;
 
        trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -486,13 +487,20 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                rnp->rew.rew_func = func;
                rnp->rew.rew_rsp = rsp;
                if (!READ_ONCE(rcu_par_gp_wq) ||
-                   rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-                       /* No workqueues yet. */
+                   rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+                   rcu_is_last_leaf_node(rsp, rnp)) {
+                       /* No workqueues yet or last leaf, do direct call. */
                        sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
                        continue;
                }
                INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
-               queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+               preempt_disable();
+               cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+               /* If all offline, queue the work on an unbound CPU. */
+               if (unlikely(cpu > rnp->grphi))
+                       cpu = WORK_CPU_UNBOUND;
+               queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+               preempt_enable();
                rnp->exp_need_flush = true;
        }
 
index ad53d13..a97c20e 100644 (file)
@@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tRCU event tracing is enabled.\n");
        if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
            (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
-               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
-                      RCU_FANOUT);
+               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
+                       RCU_FANOUT);
        if (rcu_fanout_exact)
                pr_info("\tHierarchical RCU autobalancing is disabled.\n");
        if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
@@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
                        RCU_FANOUT_LEAF);
        if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
-               pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+               pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
+                       rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
 #ifdef CONFIG_RCU_BOOST
-       pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
+       pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
+               kthread_prio, CONFIG_RCU_BOOST_DELAY);
 #endif
        if (blimit != DEFAULT_RCU_BLIMIT)
                pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
@@ -127,6 +129,7 @@ static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
 
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                               bool wake);
+static void rcu_read_unlock_special(struct task_struct *t);
 
 /*
  * Tell them what RCU they are running.
@@ -183,6 +186,9 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
        raw_lockdep_assert_held_rcu_node(rnp);
        WARN_ON_ONCE(rdp->mynode != rnp);
        WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
+       /* RCU better not be waiting on newly onlined CPUs! */
+       WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
+                    rdp->grpmask);
 
        /*
         * Decide where to queue the newly blocked task.  In theory,
@@ -260,8 +266,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * ->exp_tasks pointers, respectively, to reference the newly
         * blocked tasks.
         */
-       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
                rnp->gp_tasks = &t->rcu_node_entry;
+               WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
+       }
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
                rnp->exp_tasks = &t->rcu_node_entry;
        WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
@@ -286,20 +294,24 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 }
 
 /*
- * Record a preemptible-RCU quiescent state for the specified CPU.  Note
- * that this just means that the task currently running on the CPU is
- * not in a quiescent state.  There might be any number of tasks blocked
- * while in an RCU read-side critical section.
+ * Record a preemptible-RCU quiescent state for the specified CPU.
+ * Note that this does not necessarily mean that the task currently running
+ * on the CPU is in a quiescent state:  Instead, it means that the current
+ * grace period need not wait on any RCU read-side critical section that
+ * starts later on this CPU.  It also means that if the current task is
+ * in an RCU read-side critical section, it has already added itself to
+ * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
+ * current task, there might be any number of other tasks blocked while
+ * in an RCU read-side critical section.
  *
- * As with the other rcu_*_qs() functions, callers to this function
- * must disable preemption.
+ * Callers to this function must disable preemption.
  */
 static void rcu_preempt_qs(void)
 {
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
        if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_preempt"),
-                                      __this_cpu_read(rcu_data_p->gpnum),
+                                      __this_cpu_read(rcu_data_p->gp_seq),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
                barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
@@ -348,8 +360,8 @@ static void rcu_preempt_note_context_switch(bool preempt)
                trace_rcu_preempt_task(rdp->rsp->name,
                                       t->pid,
                                       (rnp->qsmask & rdp->grpmask)
-                                      ? rnp->gpnum
-                                      : rnp->gpnum + 1);
+                                      ? rnp->gp_seq
+                                      : rcu_seq_snap(&rnp->gp_seq));
                rcu_preempt_ctxt_queue(rnp, rdp);
        } else if (t->rcu_read_lock_nesting < 0 &&
                   t->rcu_read_unlock_special.s) {
@@ -456,7 +468,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-void rcu_read_unlock_special(struct task_struct *t)
+static void rcu_read_unlock_special(struct task_struct *t)
 {
        bool empty_exp;
        bool empty_norm;
@@ -535,13 +547,15 @@ void rcu_read_unlock_special(struct task_struct *t)
                WARN_ON_ONCE(rnp != t->rcu_blocked_node);
                WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
+               WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
+                            (!empty_norm || rnp->qsmask));
                empty_exp = sync_rcu_preempt_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
                t->rcu_blocked_node = NULL;
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
-                                               rnp->gpnum, t->pid);
+                                               rnp->gp_seq, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -562,7 +576,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                empty_exp_now = sync_rcu_preempt_exp_done(rnp);
                if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
-                                                        rnp->gpnum,
+                                                        rnp->gp_seq,
                                                         0, rnp->qsmask,
                                                         rnp->level,
                                                         rnp->grplo,
@@ -686,24 +700,27 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
  * period that still has RCU readers blocked!  This function must be
- * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
+ * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
  * must be held by the caller.
  *
  * Also, if there are blocked tasks on the list, they automatically
  * block the newly created grace period, so set up ->gp_tasks accordingly.
  */
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
 {
        struct task_struct *t;
 
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
-       WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
-       if (rcu_preempt_has_tasks(rnp)) {
+       if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+               dump_blkd_tasks(rsp, rnp, 10);
+       if (rcu_preempt_has_tasks(rnp) &&
+           (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
                rnp->gp_tasks = rnp->blkd_tasks.next;
                t = container_of(rnp->gp_tasks, struct task_struct,
                                 rcu_node_entry);
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
-                                               rnp->gpnum, t->pid);
+                                               rnp->gp_seq, t->pid);
        }
        WARN_ON_ONCE(rnp->qsmask);
 }
@@ -717,6 +734,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  */
 static void rcu_preempt_check_callbacks(void)
 {
+       struct rcu_state *rsp = &rcu_preempt_state;
        struct task_struct *t = current;
 
        if (t->rcu_read_lock_nesting == 0) {
@@ -725,7 +743,9 @@ static void rcu_preempt_check_callbacks(void)
        }
        if (t->rcu_read_lock_nesting > 0 &&
            __this_cpu_read(rcu_data_p->core_needs_qs) &&
-           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
+           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
+           !t->rcu_read_unlock_special.b.need_qs &&
+           time_after(jiffies, rsp->gp_start + HZ))
                t->rcu_read_unlock_special.b.need_qs = true;
 }
 
@@ -841,6 +861,47 @@ void exit_rcu(void)
        __rcu_read_unlock();
 }
 
+/*
+ * Dump the blocked-tasks state, but limit the list dump to the
+ * specified number of elements.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+       int cpu;
+       int i;
+       struct list_head *lhp;
+       bool onl;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp1;
+
+       raw_lockdep_assert_held_rcu_node(rnp);
+       pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+               __func__, rnp->grplo, rnp->grphi, rnp->level,
+               (long)rnp->gp_seq, (long)rnp->completedqs);
+       for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+               pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
+                       __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
+       pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
+               __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
+       pr_info("%s: ->blkd_tasks", __func__);
+       i = 0;
+       list_for_each(lhp, &rnp->blkd_tasks) {
+               pr_cont(" %p", lhp);
+               if (++i >= 10)
+                       break;
+       }
+       pr_cont("\n");
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+               rdp = per_cpu_ptr(rsp->rda, cpu);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+       }
+}
+
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
@@ -911,7 +972,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * so there is no need to check for blocked tasks.  So check only for
  * bogus qsmask values.
  */
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
 {
        WARN_ON_ONCE(rnp->qsmask);
 }
@@ -949,6 +1011,15 @@ void exit_rcu(void)
 {
 }
 
+/*
+ * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+       WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
+}
+
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 #ifdef CONFIG_RCU_BOOST
@@ -1433,7 +1504,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
                 * completed since we last checked and there are
                 * callbacks not yet ready to invoke.
                 */
-               if ((rdp->completed != rnp->completed ||
+               if ((rcu_seq_completed_gp(rdp->gp_seq,
+                                         rcu_seq_current(&rnp->gp_seq)) ||
                     unlikely(READ_ONCE(rdp->gpwrap))) &&
                    rcu_segcblist_pend_cbs(&rdp->cblist))
                        note_gp_changes(rsp, rdp);
@@ -1720,16 +1792,16 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
         */
        touch_nmi_watchdog();
 
-       if (rsp->gpnum == rdp->gpnum) {
+       ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+       if (ticks_value) {
+               ticks_title = "GPs behind";
+       } else {
                ticks_title = "ticks this GP";
                ticks_value = rdp->ticks_this_gp;
-       } else {
-               ticks_title = "GPs behind";
-               ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
-       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
+       delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
               cpu,
               "O."[!!cpu_online(cpu)],
               "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1817,7 +1889,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
 
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
 {
-       return &rnp->nocb_gp_wq[rnp->completed & 0x1];
+       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
 }
 
 static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -2069,12 +2141,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        bool needwake;
        struct rcu_node *rnp = rdp->mynode;
 
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       c = rcu_cbs_completed(rdp->rsp, rnp);
-       needwake = rcu_start_this_gp(rnp, rdp, c);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       if (needwake)
-               rcu_gp_kthread_wake(rdp->rsp);
+       local_irq_save(flags);
+       c = rcu_seq_snap(&rdp->rsp->gp_seq);
+       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+               local_irq_restore(flags);
+       } else {
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+               needwake = rcu_start_this_gp(rnp, rdp, c);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               if (needwake)
+                       rcu_gp_kthread_wake(rdp->rsp);
+       }
 
        /*
         * Wait for the grace period.  Do so interruptibly to avoid messing
@@ -2083,8 +2160,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
        for (;;) {
                swait_event_interruptible_exclusive(
-                       rnp->nocb_gp_wq[c & 0x1],
-                       (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
+                       rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
+                       (d = rcu_seq_done(&rnp->gp_seq, c)));
                if (likely(d))
                        break;
                WARN_ON(signal_pending(current));
@@ -2568,23 +2645,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
-/*
- * An adaptive-ticks CPU can potentially execute in kernel mode for an
- * arbitrarily long period of time with the scheduling-clock tick turned
- * off.  RCU will be paying attention to this CPU because it is in the
- * kernel, but the CPU cannot be guaranteed to be executing the RCU state
- * machine because the scheduling-clock tick has been disabled.  Therefore,
- * if an adaptive-ticks CPU is failing to respond to the current grace
- * period and has not be idle from an RCU perspective, kick it.
- */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
-{
-#ifdef CONFIG_NO_HZ_FULL
-       if (tick_nohz_full_cpu(cpu))
-               smp_send_reschedule(cpu);
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
-}
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2610,8 +2670,6 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
  */
 static void rcu_bind_gp_kthread(void)
 {
-       int __maybe_unused cpu;
-
        if (!tick_nohz_full_enabled())
                return;
        housekeeping_affine(current, HK_FLAG_RCU);
index 4c230a6..39cb23d 100644 (file)
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
 #ifdef CONFIG_TASKS_RCU
 
 /*
- * Simple variant of RCU whose quiescent states are voluntary context switch,
- * user-space execution, and idle.  As such, grace periods can take one good
- * long time.  There are no read-side primitives similar to rcu_read_lock()
- * and rcu_read_unlock() because this implementation is intended to get
- * the system into a safe state for some of the manipulations involved in
- * tracing and the like.  Finally, this implementation does not support
- * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
- * per-CPU callback lists will be needed.
+ * Simple variant of RCU whose quiescent states are voluntary context
+ * switch, cond_resched_rcu_qs(), user-space execution, and idle.
+ * As such, grace periods can take one good long time.  There are no
+ * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
+ * because this implementation is intended to get the system into a safe
+ * state for some of the manipulations involved in tracing and the like.
+ * Finally, this implementation does not support high call_rcu_tasks()
+ * rates from multiple CPUs.  If this is required, per-CPU callback lists
+ * will be needed.
  */
 
 /* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
  * period elapses, in other words after all currently executing RCU
  * read-side critical sections have completed. call_rcu_tasks() assumes
  * that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), entry into idle, or transition to usermode
- * execution.  As such, there are no read-side primitives analogous to
- * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
- * to determine that all tasks have passed through a safe state, not so
- * much for data-strcuture synchronization.
+ * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
+ * or transition to usermode execution.  As such, there are no read-side
+ * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
+ * this primitive is intended to determine that all tasks have passed
+ * through a safe state, not so much for data-strcuture synchronization.
  *
  * See the description of call_rcu() for more detailed information on
  * memory ordering guarantees.
@@ -667,6 +668,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
        struct rcu_head *list;
        struct rcu_head *next;
        LIST_HEAD(rcu_tasks_holdouts);
+       int fract;
 
        /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
        housekeeping_affine(current, HK_FLAG_RCU);
@@ -748,13 +750,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                 * holdouts.  When the list is empty, we are done.
                 */
                lastreport = jiffies;
-               while (!list_empty(&rcu_tasks_holdouts)) {
+
+               /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
+               fract = 10;
+
+               for (;;) {
                        bool firstreport;
                        bool needreport;
                        int rtst;
                        struct task_struct *t1;
 
-                       schedule_timeout_interruptible(HZ);
+                       if (list_empty(&rcu_tasks_holdouts))
+                               break;
+
+                       /* Slowly back off waiting for holdouts */
+                       schedule_timeout_interruptible(HZ/fract);
+
+                       if (fract > 1)
+                               fract--;
+
                        rtst = READ_ONCE(rcu_task_stall_timeout);
                        needreport = rtst > 0 &&
                                     time_after(jiffies, lastreport + rtst);
@@ -800,6 +814,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                        list = next;
                        cond_resched();
                }
+               /* Paranoid sleep to keep this from entering a tight loop */
                schedule_timeout_uninterruptible(HZ/10);
        }
 }
index 75ffc1d..6f58486 100644 (file)
@@ -390,7 +390,7 @@ static inline void tick_irq_exit(void)
 
        /* Make sure that timer wheel updates are propagated */
        if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
-               if (!in_interrupt())
+               if (!in_irq())
                        tick_nohz_irq_exit();
        }
 #endif
index 34b6652..067cb83 100644 (file)
@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
        unsigned long flags;
        bool enabled;
 
+       preempt_disable();
        raw_spin_lock_irqsave(&stopper->lock, flags);
        enabled = stopper->enabled;
        if (enabled)
@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
        raw_spin_unlock_irqrestore(&stopper->lock, flags);
 
        wake_up_q(&wakeq);
+       preempt_enable();
 
        return enabled;
 }
index da9455a..5b33e2f 100644 (file)
@@ -642,7 +642,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 
 static inline bool local_timer_softirq_pending(void)
 {
-       return local_softirq_pending() & TIMER_SOFTIRQ;
+       return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 }
 
 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
index 3de1efb..1ac24a8 100644 (file)
@@ -20,6 +20,9 @@
  * Author: Paul E. McKenney <paulmck@us.ibm.com>
  *     Based on kernel/rcu/torture.c.
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
 
 static char *torture_type;
-static bool verbose;
+static int verbose;
 
 /* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
 #define FULLSTOP_DONTSTOP 0    /* Normal operation. */
@@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
        if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
 
-       if (verbose)
+       if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
                         "torture_onoff task: offlining %d\n",
                         torture_type, cpu);
@@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
                                 "torture_onoff task: offline %d failed: errno %d\n",
                                 torture_type, cpu, ret);
        } else {
-               if (verbose)
+               if (verbose > 1)
                        pr_alert("%s" TORTURE_FLAG
                                 "torture_onoff task: offlined %d\n",
                                 torture_type, cpu);
@@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
        if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
 
-       if (verbose)
+       if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
                         "torture_onoff task: onlining %d\n",
                         torture_type, cpu);
@@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
                                 "torture_onoff task: online %d failed: errno %d\n",
                                 torture_type, cpu, ret);
        } else {
-               if (verbose)
+               if (verbose > 1)
                        pr_alert("%s" TORTURE_FLAG
                                 "torture_onoff task: onlined %d\n",
                                 torture_type, cpu);
@@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
  * The runnable parameter points to a flag that controls whether or not
  * the test is currently runnable.  If there is no such flag, pass in NULL.
  */
-bool torture_init_begin(char *ttype, bool v)
+bool torture_init_begin(char *ttype, int v)
 {
        mutex_lock(&fullstop_mutex);
        if (torture_type != NULL) {
index 6a46af2..0b0b688 100644 (file)
@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
        return !atomic_read(&buffer->record_disabled);
 }
 
+/**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+       return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
 /**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
index 87cf251..8236879 100644 (file)
@@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
+       /* Inherit the recordable setting from trace_buffer */
+       if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+               ring_buffer_record_on(tr->max_buffer.buffer);
+       else
+               ring_buffer_record_off(tr->max_buffer.buffer);
+
        swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
index d182496..5dea177 100644 (file)
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
                goto out_free;
 
  out_reg:
+       /* Up the trigger_data count to make sure reg doesn't free it on failure */
+       event_trigger_init(trigger_ops, trigger_data);
        ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
        /*
         * The above returns on success the # of functions enabled,
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
         * Consider no functions a failure too.
         */
        if (!ret) {
+               cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
                ret = -ENOENT;
-               goto out_free;
-       } else if (ret < 0)
-               goto out_free;
-       ret = 0;
+       } else if (ret > 0)
+               ret = 0;
+
+       /* Down the counter of trigger_data or free it if not used anymore */
+       event_trigger_free(trigger_ops, trigger_data);
  out:
        return ret;
 
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
                goto out;
        }
 
+       /* Up the trigger_data count to make sure nothing frees it on failure */
+       event_trigger_init(trigger_ops, trigger_data);
+
        if (trigger) {
                number = strsep(&trigger, ":");
 
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
                goto out_disable;
        /* Just return zero, not the number of enabled functions */
        ret = 0;
+       event_trigger_free(trigger_ops, trigger_data);
  out:
        return ret;
 
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
  out_free:
        if (cmd_ops->set_filter)
                cmd_ops->set_filter(NULL, trigger_data, NULL);
-       kfree(trigger_data);
+       event_trigger_free(trigger_ops, trigger_data);
        kfree(enable_data);
        goto out;
 }
index 21f7184..6b71860 100644 (file)
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
 static int
 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 {
+       struct event_file_link *link = NULL;
        int ret = 0;
 
        if (file) {
-               struct event_file_link *link;
-
                link = kmalloc(sizeof(*link), GFP_KERNEL);
                if (!link) {
                        ret = -ENOMEM;
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
                else
                        ret = enable_kprobe(&tk->rp.kp);
        }
+
+       if (ret) {
+               if (file) {
+                       /* Notice the if is true on not WARN() */
+                       if (!WARN_ON_ONCE(!link))
+                               list_del_rcu(&link->list);
+                       kfree(link);
+                       tk->tp.flags &= ~TP_FLAG_TRACE;
+               } else {
+                       tk->tp.flags &= ~TP_FLAG_PROFILE;
+               }
+       }
  out:
        return ret;
 }
index c253c1b..befb127 100644 (file)
@@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN
 
 config KASAN
        bool "KASan: runtime memory debugger"
-       depends on SLUB || (SLAB && !DEBUG_SLAB)
+       depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
        select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        select STACKDEPOT
index 19d42ea..98fa559 100644 (file)
@@ -1,9 +1,6 @@
 config ARCH_HAS_UBSAN_SANITIZE_ALL
        bool
 
-config ARCH_WANTS_UBSAN_NO_NULL
-       def_bool n
-
 config UBSAN
        bool "Undefined behaviour sanity checker"
        help
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
          Enabling this option on architectures that support unaligned
          accesses may produce a lot of false positives.
 
-config UBSAN_NULL
-       bool "Enable checking of null pointers"
-       depends on UBSAN
-       default y if !ARCH_WANTS_UBSAN_NO_NULL
-       help
-         This option enables detection of memory accesses via a
-         null pointer.
-
 config TEST_UBSAN
        tristate "Module for testing for undefined behavior detection"
        depends on m && UBSAN
index 994be48..70935ed 100644 (file)
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
 
        limit++;
        if (is_on_stack)
-               pr_warn("object is on stack, but not annotated\n");
+               pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+                        task_stack_page(current));
        else
-               pr_warn("object is not on stack, but annotated\n");
+               pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+                        task_stack_page(current));
+
        WARN_ON(1);
 }
 
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
 
        if (!obj_cache || debug_objects_replace_static_objects()) {
                debug_objects_enabled = 0;
-               if (obj_cache)
-                       kmem_cache_destroy(obj_cache);
+               kmem_cache_destroy(obj_cache);
                pr_warn("out of memory.\n");
        } else
                debug_objects_selftest();
index 039ddbc..3103099 100644 (file)
@@ -3167,6 +3167,13 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
        return 0;
 }
 
+/*
+ * When a new function is introduced to vm_operations_struct and added
+ * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
+ * This is because under System V memory model, mappings created via
+ * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
+ * their original vm_ops are overwritten with shm_vm_ops.
+ */
 const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
index 8c0280b..b2173f7 100644 (file)
@@ -4037,6 +4037,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
 
 static DEFINE_IDR(mem_cgroup_idr);
 
+static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+{
+       if (memcg->id.id > 0) {
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+               memcg->id.id = 0;
+       }
+}
+
 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
 {
        VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
@@ -4047,8 +4055,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
 {
        VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
        if (atomic_sub_and_test(n, &memcg->id.ref)) {
-               idr_remove(&mem_cgroup_idr, memcg->id.id);
-               memcg->id.id = 0;
+               mem_cgroup_id_remove(memcg);
 
                /* Memcg ID pins CSS */
                css_put(&memcg->css);
@@ -4185,8 +4192,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
        return memcg;
 fail:
-       if (memcg->id.id > 0)
-               idr_remove(&mem_cgroup_idr, memcg->id.id);
+       mem_cgroup_id_remove(memcg);
        __mem_cgroup_free(memcg);
        return NULL;
 }
@@ -4245,6 +4251,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 
        return &memcg->css;
 fail:
+       mem_cgroup_id_remove(memcg);
        mem_cgroup_free(memcg);
        return ERR_PTR(-ENOMEM);
 }
index 7206a63..c5e87a3 100644 (file)
@@ -1417,11 +1417,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
        do {
                next = pmd_addr_end(addr, end);
                if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
-                       if (next - addr != HPAGE_PMD_SIZE) {
-                               VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
-                                   !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+                       if (next - addr != HPAGE_PMD_SIZE)
                                __split_huge_pmd(vma, pmd, addr, false, NULL);
-                       else if (zap_huge_pmd(tlb, vma, pmd, addr))
+                       else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
                        /* fall through */
                }
@@ -4397,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+       if (!maddr)
+               return -ENOMEM;
+
        if (write)
                memcpy_toio(maddr + offset, buf, len);
        else
index 9ac49ef..01f1a14 100644 (file)
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
 
                /* Create pseudo-vma that contains just the policy */
                memset(&pvma, 0, sizeof(struct vm_area_struct));
+               vma_init(&pvma, NULL);
                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
 
index ff1944d..17bbf4d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1778,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
+       } else {
+               vma_set_anonymous(vma);
        }
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -2983,6 +2985,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
                return -ENOMEM;
        }
 
+       vma_set_anonymous(vma);
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
index 1d22fdb..9fc9e43 100644 (file)
@@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
                if (ret < len)
                        memset(base + ret, 0, len - ret);
 
+       } else {
+               vma_set_anonymous(vma);
        }
 
        return 0;
index 2cab844..41b9bbf 100644 (file)
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
 {
        /* Create a pseudo vma that just contains the policy */
        memset(vma, 0, sizeof(*vma));
+       vma_init(vma, NULL);
        /* Bias interleave by inode number to distribute better across nodes */
        vma->vm_pgoff = index + info->vfs_inode.i_ino;
        vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
index 7d34e69..cd91fd9 100644 (file)
@@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
                        ret = -ENOMEM;
                        goto reject;
                }
+
+               /* A second zswap_is_full() check after
+                * zswap_shrink() to make sure it's now
+                * under the max_pool_percent
+                */
+               if (zswap_is_full()) {
+                       ret = -ENOMEM;
+                       goto reject;
+               }
        }
 
        /* allocate entry */
index e0adcd1..711d715 100644 (file)
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
        caifd = caif_get(skb->dev);
 
        WARN_ON(caifd == NULL);
-       if (caifd == NULL)
+       if (!caifd) {
+               rcu_read_unlock();
                return;
+       }
 
        caifd_hold(caifd);
        rcu_read_unlock();
index a5aa1c7..559a912 100644 (file)
@@ -7149,16 +7149,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
                dev->tx_queue_len = new_len;
                res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
                res = notifier_to_errno(res);
-               if (res) {
-                       netdev_err(dev,
-                                  "refused to change device tx_queue_len\n");
-                       dev->tx_queue_len = orig_len;
-                       return res;
-               }
-               return dev_qdisc_change_tx_queue_len(dev);
+               if (res)
+                       goto err_rollback;
+               res = dev_qdisc_change_tx_queue_len(dev);
+               if (res)
+                       goto err_rollback;
        }
 
        return 0;
+
+err_rollback:
+       netdev_err(dev, "refused to change device tx_queue_len\n");
+       dev->tx_queue_len = orig_len;
+       return res;
 }
 
 /**
index 06da770..9dfd145 100644 (file)
@@ -1712,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
           u32, offset, void *, to, u32, len, u32, start_header)
 {
+       u8 *end = skb_tail_pointer(skb);
+       u8 *net = skb_network_header(skb);
+       u8 *mac = skb_mac_header(skb);
        u8 *ptr;
 
-       if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
+       if (unlikely(offset > 0xffff || len > (end - mac)))
                goto err_clear;
 
        switch (start_header) {
        case BPF_HDR_START_MAC:
-               ptr = skb_mac_header(skb) + offset;
+               ptr = mac + offset;
                break;
        case BPF_HDR_START_NET:
-               ptr = skb_network_header(skb) + offset;
+               ptr = net + offset;
                break;
        default:
                goto err_clear;
        }
 
-       if (likely(ptr >= skb_mac_header(skb) &&
-                  ptr + len <= skb_tail_pointer(skb))) {
+       if (likely(ptr >= mac && ptr + len <= end)) {
                memcpy(to, ptr, len);
                return 0;
        }
index e7e626f..e450985 100644 (file)
@@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
        if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
                return -EINVAL;
 
-       prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL);
+       prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
        if (!prog->name)
                return -ENOMEM;
 
index 68bf072..43a932c 100644 (file)
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
        struct page *page;
 
        /* Empty recycle ring */
-       while ((page = ptr_ring_consume(&pool->ring))) {
+       while ((page = ptr_ring_consume_bh(&pool->ring))) {
                /* Verify the refcnt invariant of cached pages */
                if (!(page_ref_count(page) == 1))
                        pr_crit("%s() page_pool refcnt %d violation\n",
index 5ef6122..e3f743c 100644 (file)
@@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
                        return err;
        }
 
-       dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
-       __dev_notify_flags(dev, old_flags, ~0U);
+       if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+               __dev_notify_flags(dev, old_flags, 0U);
+       } else {
+               dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+               __dev_notify_flags(dev, old_flags, ~0U);
+       }
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index 8e51f85..fb35b62 100644 (file)
@@ -3720,6 +3720,7 @@ normal:
                                net_warn_ratelimited(
                                        "skb_segment: too many frags: %u %u\n",
                                        pos, mss);
+                               err = -EINVAL;
                                goto err;
                        }
 
@@ -3753,11 +3754,10 @@ skip_fraglist:
 
 perform_csum_check:
                if (!csum) {
-                       if (skb_has_shared_frag(nskb)) {
-                               err = __skb_linearize(nskb);
-                               if (err)
-                                       goto err;
-                       }
+                       if (skb_has_shared_frag(nskb) &&
+                           __skb_linearize(nskb))
+                               goto err;
+
                        if (!nskb->remcsum_offload)
                                nskb->ip_summed = CHECKSUM_NONE;
                        SKB_GSO_CB(nskb)->csum =
index 9e8f655..bc2d7a3 100644 (file)
@@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
                pfrag->offset += use;
 
                sge = sg + sg_curr - 1;
-               if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
-                   sg->offset + sg->length == orig_offset) {
-                       sg->length += use;
+               if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
+                   sge->offset + sge->length == orig_offset) {
+                       sge->length += use;
                } else {
                        sge = sg + sg_curr;
                        sg_unmark_end(sge);
index 9d1f220..6771f18 100644 (file)
@@ -345,7 +345,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                rcu_read_lock();
                /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
-               xa->zc_alloc->free(xa->zc_alloc, handle);
+               if (!WARN_ON_ONCE(!xa))
+                       xa->zc_alloc->free(xa->zc_alloc, handle);
                rcu_read_unlock();
        default:
                /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
index 2b75df4..842a9c7 100644 (file)
@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
        struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
        u32 cwnd = hc->tx_cwnd, restart_cwnd,
            iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+       s32 delta = now - hc->tx_lsndtime;
 
        hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
 
        /* don't reduce cwnd below the initial window (IW) */
        restart_cwnd = min(cwnd, iwnd);
-       cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
-       hc->tx_cwnd = max(cwnd, restart_cwnd);
 
+       while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
+               cwnd >>= 1;
+       hc->tx_cwnd = max(cwnd, restart_cwnd);
        hc->tx_cwnd_stamp = now;
        hc->tx_cwnd_used  = 0;
 
index 1e3b6a6..9864bcd 100644 (file)
@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev)
+       if (!dev->phydev && !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->set_mac_eee)
@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev)
+       if (!dev->phydev && !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->get_mac_eee)
@@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
 
+       if (!netif_running(slave_dev))
+               return 0;
+
        netif_device_detach(slave_dev);
 
        rtnl_lock();
@@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
 
+       if (!netif_running(slave_dev))
+               return 0;
+
        netif_device_attach(slave_dev);
 
        rtnl_lock();
index e46cdd3..2998b0e 100644 (file)
@@ -292,19 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                return ip_hdr(skb)->daddr;
 
        in_dev = __in_dev_get_rcu(dev);
-       BUG_ON(!in_dev);
 
        net = dev_net(dev);
 
        scope = RT_SCOPE_UNIVERSE;
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+               bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
                struct flowi4 fl4 = {
                        .flowi4_iif = LOOPBACK_IFINDEX,
                        .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
                        .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
                        .flowi4_scope = scope,
-                       .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
+                       .flowi4_mark = vmark ? skb->mark : 0,
                };
                if (!fib_lookup(net, &fl4, &res, 0))
                        return FIB_RES_PREFSRC(net, res);
index b3c899a..75151be 100644 (file)
@@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        spin_lock_bh(&im->lock);
        if (pmc) {
                im->interface = pmc->interface;
-               im->sfmode = pmc->sfmode;
-               if (pmc->sfmode == MCAST_INCLUDE) {
+               if (im->sfmode == MCAST_INCLUDE) {
                        im->tomb = pmc->tomb;
                        im->sources = pmc->sources;
                        for (psf = im->sources; psf; psf = psf->sf_next)
@@ -1388,7 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
 /*
  *     A socket has joined a multicast group on device dev.
  */
-void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode)
+static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
+                             unsigned int mode)
 {
        struct ip_mc_list *im;
 #ifdef CONFIG_IP_MULTICAST
index 1e4cf3a..0d70608 100644 (file)
@@ -157,9 +157,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 {
        struct inet_frag_queue *q;
 
-       if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
-               return NULL;
-
        q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
        if (!q)
                return NULL;
@@ -204,6 +201,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
 {
        struct inet_frag_queue *fq;
 
+       if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
+               return NULL;
+
        rcu_read_lock();
 
        fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
index 8e9528e..d14d741 100644 (file)
@@ -383,11 +383,16 @@ found:
                int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
 
                if (i < next->len) {
+                       int delta = -next->truesize;
+
                        /* Eat head of the next overlapped fragment
                         * and leave the loop. The next ones cannot overlap.
                         */
                        if (!pskb_pull(next, i))
                                goto err;
+                       delta += next->truesize;
+                       if (delta)
+                               add_frag_mem_limit(qp->q.net, delta);
                        next->ip_defrag_offset += i;
                        qp->q.meat -= i;
                        if (next->ip_summed != CHECKSUM_UNNECESSARY)
index b3308e9..0e3edd2 100644 (file)
@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
index 64c76dc..c0fe5ad 100644 (file)
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
        const struct iphdr *iph = ip_hdr(skb);
-       __be16 *ports = (__be16 *)skb_transport_header(skb);
+       __be16 *ports;
+       int end;
 
-       if (skb_transport_offset(skb) + 4 > (int)skb->len)
+       end = skb_transport_offset(skb) + 4;
+       if (end > 0 && !pskb_may_pull(skb, end))
                return;
 
        /* All current transport protocols have the port numbers in the
         * first four bytes of the transport header and this function is
         * written with this assumption in mind.
         */
+       ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
        sin.sin_addr.s_addr = iph->daddr;
index 58e2f47..4bfff3c 100644 (file)
@@ -354,6 +354,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
        /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
        cwnd = (cwnd + 1) & ~1U;
 
+       /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
+       if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
+               cwnd += 2;
+
        return cwnd;
 }
 
index 5869f89..8b637f9 100644 (file)
@@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=0 to CE=1 and delayed
-        * ACK has not sent yet.
-        */
-       if (!ca->ce_state &&
-           inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=0. */
-               tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (!ca->ce_state) {
+               /* State has changed from CE=0 to CE=1, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk, 1);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=1 to CE=0 and delayed
-        * ACK has not sent yet.
-        */
-       if (ca->ce_state &&
-           inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=1. */
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (ca->ce_state) {
+               /* State has changed from CE=1 to CE=0, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk, 1);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
index 8e5522c..f9dcb29 100644 (file)
@@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
                icsk->icsk_ack.quick = quickacks;
 }
 
-static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
        icsk->icsk_ack.pingpong = 0;
        icsk->icsk_ack.ato = TCP_ATO_MIN;
 }
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
 
 /* Send ACKs quickly, if "quick" count is not exhausted
  * and the session is not interactive.
@@ -245,8 +246,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
 
 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
 {
-       if (tcp_hdr(skb)->cwr)
+       if (tcp_hdr(skb)->cwr) {
                tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+
+               /* If the sender is telling us it has entered CWR, then its
+                * cwnd may be very low (even just 1 packet), so we should ACK
+                * immediately.
+                */
+               tcp_enter_quickack_mode((struct sock *)tp, 2);
+       }
 }
 
 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
@@ -4357,6 +4365,23 @@ static bool tcp_try_coalesce(struct sock *sk,
        return true;
 }
 
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+                            struct sk_buff *to,
+                            struct sk_buff *from,
+                            bool *fragstolen)
+{
+       bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+       /* In case tcp_drop() is called later, update to->gso_segs */
+       if (res) {
+               u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+                              max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+               skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+       }
+       return res;
+}
+
 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
 {
        sk_drops_add(sk, skb);
@@ -4480,8 +4505,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        /* In the typical case, we are adding an skb to the end of the list.
         * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
         */
-       if (tcp_try_coalesce(sk, tp->ooo_last_skb,
-                            skb, &fragstolen)) {
+       if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+                                skb, &fragstolen)) {
 coalesce_done:
                tcp_grow_window(sk, skb);
                kfree_skb_partial(skb, fragstolen);
@@ -4509,7 +4534,7 @@ coalesce_done:
                                /* All the bits are present. Drop. */
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
-                               __kfree_skb(skb);
+                               tcp_drop(sk, skb);
                                skb = NULL;
                                tcp_dsack_set(sk, seq, end_seq);
                                goto add_sack;
@@ -4528,11 +4553,11 @@ coalesce_done:
                                                 TCP_SKB_CB(skb1)->end_seq);
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
-                               __kfree_skb(skb1);
+                               tcp_drop(sk, skb1);
                                goto merge_right;
                        }
-               } else if (tcp_try_coalesce(sk, skb1,
-                                           skb, &fragstolen)) {
+               } else if (tcp_ooo_try_coalesce(sk, skb1,
+                                               skb, &fragstolen)) {
                        goto coalesce_done;
                }
                p = &parent->rb_right;
@@ -4901,6 +4926,7 @@ end:
 static void tcp_collapse_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 range_truesize, sum_tiny = 0;
        struct sk_buff *skb, *head;
        u32 start, end;
 
@@ -4912,6 +4938,7 @@ new_range:
        }
        start = TCP_SKB_CB(skb)->seq;
        end = TCP_SKB_CB(skb)->end_seq;
+       range_truesize = skb->truesize;
 
        for (head = skb;;) {
                skb = skb_rb_next(skb);
@@ -4922,11 +4949,20 @@ new_range:
                if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
-                       tcp_collapse(sk, NULL, &tp->out_of_order_queue,
-                                    head, skb, start, end);
+                       /* Do not attempt collapsing tiny skbs */
+                       if (range_truesize != head->truesize ||
+                           end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+                               tcp_collapse(sk, NULL, &tp->out_of_order_queue,
+                                            head, skb, start, end);
+                       } else {
+                               sum_tiny += range_truesize;
+                               if (sum_tiny > sk->sk_rcvbuf >> 3)
+                                       return;
+                       }
                        goto new_range;
                }
 
+               range_truesize += skb->truesize;
                if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
                        start = TCP_SKB_CB(skb)->seq;
                if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4941,6 +4977,7 @@ new_range:
  * 2) not add too big latencies if thousands of packets sit there.
  *    (But if application shrinks SO_RCVBUF, we could still end up
  *     freeing whole queue here)
+ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
  *
  * Return true if queue has shrunk.
  */
@@ -4948,20 +4985,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct rb_node *node, *prev;
+       int goal;
 
        if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
                return false;
 
        NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
+       goal = sk->sk_rcvbuf >> 3;
        node = &tp->ooo_last_skb->rbnode;
        do {
                prev = rb_prev(node);
                rb_erase(node, &tp->out_of_order_queue);
+               goal -= rb_to_skb(node)->truesize;
                tcp_drop(sk, rb_to_skb(node));
-               sk_mem_reclaim(sk);
-               if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
-                   !tcp_under_memory_pressure(sk))
-                       break;
+               if (!prev || goal <= 0) {
+                       sk_mem_reclaim(sk);
+                       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+                           !tcp_under_memory_pressure(sk))
+                               break;
+                       goal = sk->sk_rcvbuf >> 3;
+               }
                node = prev;
        } while (node);
        tp->ooo_last_skb = rb_to_skb(prev);
@@ -4996,6 +5039,9 @@ static int tcp_prune_queue(struct sock *sk)
        else if (tcp_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
+       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+               return 0;
+
        tcp_collapse_ofo_queue(sk);
        if (!skb_queue_empty(&sk->sk_receive_queue))
                tcp_collapse(sk, &sk->sk_receive_queue, NULL,
index 00e5a30..c4172c1 100644 (file)
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 }
 
 /* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+                                     u32 rcv_nxt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
                if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
                        __sock_put(sk);
        }
+
+       if (unlikely(rcv_nxt != tp->rcv_nxt))
+               return;  /* Special ACK sent by DCTCP to reflect ECN */
        tcp_dec_quickack_mode(sk, pkts);
        inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
@@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
  * We are working here with either a clone of the original
  * SKB, or a fresh unique copy made by the retransmit engine.
  */
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
-                           gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+                             int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet;
@@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        th->source              = inet->inet_sport;
        th->dest                = inet->inet_dport;
        th->seq                 = htonl(tcb->seq);
-       th->ack_seq             = htonl(tp->rcv_nxt);
+       th->ack_seq             = htonl(rcv_nxt);
        *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->tcp_flags);
 
@@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        icsk->icsk_af_ops->send_check(sk, skb);
 
        if (likely(tcb->tcp_flags & TCPHDR_ACK))
-               tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+               tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
 
        if (skb->len != tcp_header_size) {
                tcp_event_data_sent(tp, sk);
@@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        return err;
 }
 
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+                           gfp_t gfp_mask)
+{
+       return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+                                 tcp_sk(sk)->rcv_nxt);
+}
+
 /* This routine just queues the buffer for sending.
  *
  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3571,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk)
 }
 
 /* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
 {
        struct sk_buff *buff;
 
@@ -3604,9 +3615,14 @@ void tcp_send_ack(struct sock *sk)
        skb_set_tcp_pure_ack(buff);
 
        /* Send it off, this clears delayed acks for us. */
-       tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
+       __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
+}
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+       __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
 }
-EXPORT_SYMBOL_GPL(tcp_send_ack);
 
 /* This routine sends a packet with an out of date sequence
  * number. It assumes the other end will try to ack it.
index 91580c6..f66a1ca 100644 (file)
@@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                        continue;
                if ((rt->fib6_flags & noflags) != 0)
                        continue;
-               fib6_info_hold(rt);
+               if (!fib6_info_hold_safe(rt))
+                       continue;
                break;
        }
 out:
index 2ee08b6..1a1f876 100644 (file)
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
        }
        if (np->rxopt.bits.rxorigdstaddr) {
                struct sockaddr_in6 sin6;
-               __be16 *ports = (__be16 *) skb_transport_header(skb);
+               __be16 *ports;
+               int end;
 
-               if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
+               end = skb_transport_offset(skb) + 4;
+               if (end <= 0 || pskb_may_pull(skb, end)) {
                        /* All current transport protocols have the port numbers in the
                         * first four bytes of the transport header and this function is
                         * written with this assumption in mind.
                         */
+                       ports = (__be16 *)skb_transport_header(skb);
 
                        sin6.sin6_family = AF_INET6;
                        sin6.sin6_addr = ipv6_hdr(skb)->daddr;
index 97513f3..88a7579 100644 (file)
@@ -669,8 +669,10 @@ skip_cow:
 
        sg_init_table(sg, nfrags);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
-       if (unlikely(ret < 0))
+       if (unlikely(ret < 0)) {
+               kfree(tmp);
                goto out;
+       }
 
        skb->ip_summed = CHECKSUM_NONE;
 
index be491bf..ef2505a 100644 (file)
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
 
        /* for local traffic to local address, skb dev is the loopback
         * device. Check if there is a dst attached to the skb and if so
-        * get the real device index.
+        * get the real device index. Same is needed for replies to a link
+        * local address on a device enslaved to an L3 master device
         */
-       if (unlikely(iif == LOOPBACK_IFINDEX)) {
+       if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
                const struct rt6_info *rt6 = skb_rt6_info(skb);
 
                if (rt6)
index a14fb4f..3168847 100644 (file)
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
 #ifdef CONFIG_NET_SCHED
        to->tc_index = from->tc_index;
 #endif
index 00e138a..1cc9650 100644 (file)
@@ -1133,12 +1133,8 @@ route_lookup:
                max_headroom += 8;
                mtu -= 8;
        }
-       if (skb->protocol == htons(ETH_P_IPV6)) {
-               if (mtu < IPV6_MIN_MTU)
-                       mtu = IPV6_MIN_MTU;
-       } else if (mtu < 576) {
-               mtu = 576;
-       }
+       mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
+                      IPV6_MIN_MTU : IPV4_MIN_MTU);
 
        skb_dst_update_pmtu(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
index b7f28de..c72ae3a 100644 (file)
@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                goto tx_err_dst_release;
        }
 
-       skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
-       skb_dst_set(skb, dst);
-       skb->dev = skb_dst(skb)->dev;
-
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
                skb_dst_update_pmtu(skb, mtu);
@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                                  htonl(mtu));
                }
 
-               return -EMSGSIZE;
+               err = -EMSGSIZE;
+               goto tx_err_dst_release;
        }
 
+       skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+       skb_dst_set(skb, dst);
+       skb->dev = skb_dst(skb)->dev;
+
        err = dst_output(t->net, skb->sk, skb);
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
index 2699be7..f60f310 100644 (file)
@@ -790,8 +790,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        spin_lock_bh(&im->mca_lock);
        if (pmc) {
                im->idev = pmc->idev;
-               im->mca_sfmode = pmc->mca_sfmode;
-               if (pmc->mca_sfmode == MCAST_INCLUDE) {
+               if (im->mca_sfmode == MCAST_INCLUDE) {
                        im->mca_tomb = pmc->mca_tomb;
                        im->mca_sources = pmc->mca_sources;
                        for (psf = im->mca_sources; psf; psf = psf->sf_next)
index 2ce0bd1..7208c16 100644 (file)
@@ -972,18 +972,15 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
        rt->dst.lastuse = jiffies;
 }
 
+/* Caller must already hold reference to @from */
 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
 {
        rt->rt6i_flags &= ~RTF_EXPIRES;
-       fib6_info_hold(from);
        rcu_assign_pointer(rt->from, from);
        dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
-       if (from->fib6_metrics != &dst_default_metrics) {
-               rt->dst._metrics |= DST_METRICS_REFCOUNTED;
-               refcount_inc(&from->fib6_metrics->refcnt);
-       }
 }
 
+/* Caller must already hold reference to @ort */
 static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
 {
        struct net_device *dev = fib6_info_nh_dev(ort);
@@ -1044,9 +1041,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
        struct net_device *dev = rt->fib6_nh.nh_dev;
        struct rt6_info *nrt;
 
+       if (!fib6_info_hold_safe(rt))
+               return NULL;
+
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
        if (nrt)
                ip6_rt_copy_init(nrt, rt);
+       else
+               fib6_info_release(rt);
 
        return nrt;
 }
@@ -1178,10 +1180,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
         *      Clone the route.
         */
 
+       if (!fib6_info_hold_safe(ort))
+               return NULL;
+
        dev = ip6_rt_get_dev_rcu(ort);
        rt = ip6_dst_alloc(dev_net(dev), dev, 0);
-       if (!rt)
+       if (!rt) {
+               fib6_info_release(ort);
                return NULL;
+       }
 
        ip6_rt_copy_init(rt, ort);
        rt->rt6i_flags |= RTF_CACHE;
@@ -1210,12 +1217,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
        struct net_device *dev;
        struct rt6_info *pcpu_rt;
 
+       if (!fib6_info_hold_safe(rt))
+               return NULL;
+
        rcu_read_lock();
        dev = ip6_rt_get_dev_rcu(rt);
        pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
        rcu_read_unlock();
-       if (!pcpu_rt)
+       if (!pcpu_rt) {
+               fib6_info_release(rt);
                return NULL;
+       }
        ip6_rt_copy_init(pcpu_rt, rt);
        pcpu_rt->rt6i_flags |= RTF_PCPU;
        return pcpu_rt;
@@ -2486,7 +2498,7 @@ restart:
 
 out:
        if (ret)
-               dst_hold(&ret->dst);
+               ip6_hold_safe(net, &ret, true);
        else
                ret = ip6_create_rt_rcu(rt);
 
@@ -3303,7 +3315,8 @@ static int ip6_route_del(struct fib6_config *cfg,
                                continue;
                        if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
                                continue;
-                       fib6_info_hold(rt);
+                       if (!fib6_info_hold_safe(rt))
+                               continue;
                        rcu_read_unlock();
 
                        /* if gateway was specified only delete the one hop */
@@ -3409,6 +3422,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
        rcu_read_lock();
        from = rcu_dereference(rt->from);
+       /* This fib6_info_hold() is safe here because we hold reference to rt
+        * and rt already holds reference to fib6_info.
+        */
        fib6_info_hold(from);
        rcu_read_unlock();
 
@@ -3470,7 +3486,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
                        continue;
                if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
                        continue;
-               fib6_info_hold(rt);
+               if (!fib6_info_hold_safe(rt))
+                       continue;
                break;
        }
 out:
@@ -3530,8 +3547,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
                    ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
                        break;
        }
-       if (rt)
-               fib6_info_hold(rt);
+       if (rt && !fib6_info_hold_safe(rt))
+               rt = NULL;
        rcu_read_unlock();
        return rt;
 }
@@ -3579,8 +3596,8 @@ restart:
                struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
 
                if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-                   (!idev || idev->cnf.accept_ra != 2)) {
-                       fib6_info_hold(rt);
+                   (!idev || idev->cnf.accept_ra != 2) &&
+                   fib6_info_hold_safe(rt)) {
                        rcu_read_unlock();
                        ip6_del_rt(net, rt);
                        goto restart;
index 7efa9fd..03e6b7a 100644 (file)
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
                                           &tcp_hashinfo, NULL, 0,
                                           &ipv6h->saddr,
                                           th->source, &ipv6h->daddr,
-                                          ntohs(th->source), tcp_v6_iif(skb),
+                                          ntohs(th->source),
+                                          tcp_v6_iif_l3_slave(skb),
                                           tcp_v6_sdif(skb));
                if (!sk1)
                        goto out;
@@ -1609,7 +1610,8 @@ do_time_wait:
                                            skb, __tcp_hdrlen(th),
                                            &ipv6_hdr(skb)->saddr, th->source,
                                            &ipv6_hdr(skb)->daddr,
-                                           ntohs(th->dest), tcp_v6_iif(skb),
+                                           ntohs(th->dest),
+                                           tcp_v6_iif_l3_slave(skb),
                                            sdif);
                if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
index e398797..cf6cca2 100644 (file)
@@ -1201,13 +1201,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
                                l2tp_session_get(sock_net(sk), tunnel,
                                                 stats.session_id);
 
-                       if (session && session->pwtype == L2TP_PWTYPE_PPP) {
-                               err = pppol2tp_session_ioctl(session, cmd,
-                                                            arg);
+                       if (!session) {
+                               err = -EBADR;
+                               break;
+                       }
+                       if (session->pwtype != L2TP_PWTYPE_PPP) {
                                l2tp_session_dec_refcount(session);
-                       } else {
                                err = -EBADR;
+                               break;
                        }
+
+                       err = pppol2tp_session_ioctl(session, cmd, arg);
+                       l2tp_session_dec_refcount(session);
                        break;
                }
 #ifdef CONFIG_XFRM
index 8904126..260b3dc 100644 (file)
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
 
        rcu_read_lock_bh();
        sap = __llc_sap_find(sap_value);
-       if (sap)
-               llc_sap_hold(sap);
+       if (!sap || !llc_sap_hold_safe(sap))
+               sap = NULL;
        rcu_read_unlock_bh();
        return sap;
 }
index 0a38cc1..932985c 100644 (file)
@@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                     sdata->control_port_over_nl80211)) {
                struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
                bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
-               struct ethhdr *ehdr = eth_hdr(skb);
 
-               cfg80211_rx_control_port(dev, skb->data, skb->len,
-                                        ehdr->h_source,
-                                        be16_to_cpu(skb->protocol), noencrypt);
+               cfg80211_rx_control_port(dev, skb, noencrypt);
                dev_kfree_skb(skb);
        } else {
                /* deliver to local stack */
index 5e2e511..d02fbfe 100644 (file)
@@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                if (!sta->uploaded)
                        continue;
 
-               if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+               if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+                   sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
                        continue;
 
                for (state = IEEE80211_STA_NOTEXIST;
index abe647d..9ce6336 100644 (file)
@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
                 * We currently ignore Sync packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
                [DCCP_PKT_SYNCACK] = {
                /*
                 * We currently ignore SyncAck packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
        },
        [CT_DCCP_ROLE_SERVER] = {
@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
                 * We currently ignore Sync packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
                [DCCP_PKT_SYNCACK] = {
                /*
                 * We currently ignore SyncAck packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
        },
 };
index 896d4a3..f5745e4 100644 (file)
@@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
 {
        ctx->net        = net;
        ctx->family     = family;
+       ctx->level      = 0;
        ctx->table      = table;
        ctx->chain      = chain;
        ctx->nla        = nla;
@@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
        struct nft_base_chain *basechain;
        struct nft_stats *stats = NULL;
        struct nft_chain_hook hook;
-       const struct nlattr *name;
        struct nf_hook_ops *ops;
        struct nft_trans *trans;
        int err;
@@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
                        return PTR_ERR(stats);
        }
 
+       err = -ENOMEM;
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
                                sizeof(struct nft_trans_chain));
-       if (trans == NULL) {
-               free_percpu(stats);
-               return -ENOMEM;
-       }
+       if (trans == NULL)
+               goto err;
 
        nft_trans_chain_stats(trans) = stats;
        nft_trans_chain_update(trans) = true;
@@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
        else
                nft_trans_chain_policy(trans) = -1;
 
-       name = nla[NFTA_CHAIN_NAME];
-       if (nla[NFTA_CHAIN_HANDLE] && name) {
-               nft_trans_chain_name(trans) =
-                       nla_strdup(name, GFP_KERNEL);
-               if (!nft_trans_chain_name(trans)) {
-                       kfree(trans);
-                       free_percpu(stats);
-                       return -ENOMEM;
+       if (nla[NFTA_CHAIN_HANDLE] &&
+           nla[NFTA_CHAIN_NAME]) {
+               struct nft_trans *tmp;
+               char *name;
+
+               err = -ENOMEM;
+               name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
+               if (!name)
+                       goto err;
+
+               err = -EEXIST;
+               list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
+                       if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
+                           tmp->ctx.table == table &&
+                           nft_trans_chain_update(tmp) &&
+                           nft_trans_chain_name(tmp) &&
+                           strcmp(name, nft_trans_chain_name(tmp)) == 0) {
+                               kfree(name);
+                               goto err;
+                       }
                }
+
+               nft_trans_chain_name(trans) = name;
        }
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
        return 0;
+err:
+       free_percpu(stats);
+       kfree(trans);
+       return err;
 }
 
 static int nf_tables_newchain(struct net *net, struct sock *nlsk,
@@ -2254,6 +2271,39 @@ done:
        return skb->len;
 }
 
+static int nf_tables_dump_rules_start(struct netlink_callback *cb)
+{
+       const struct nlattr * const *nla = cb->data;
+       struct nft_rule_dump_ctx *ctx = NULL;
+
+       if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
+               ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+               if (!ctx)
+                       return -ENOMEM;
+
+               if (nla[NFTA_RULE_TABLE]) {
+                       ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
+                                                       GFP_ATOMIC);
+                       if (!ctx->table) {
+                               kfree(ctx);
+                               return -ENOMEM;
+                       }
+               }
+               if (nla[NFTA_RULE_CHAIN]) {
+                       ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
+                                               GFP_ATOMIC);
+                       if (!ctx->chain) {
+                               kfree(ctx->table);
+                               kfree(ctx);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       cb->data = ctx;
+       return 0;
+}
+
 static int nf_tables_dump_rules_done(struct netlink_callback *cb)
 {
        struct nft_rule_dump_ctx *ctx = cb->data;
@@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start= nf_tables_dump_rules_start,
                        .dump = nf_tables_dump_rules,
                        .done = nf_tables_dump_rules_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
-                       struct nft_rule_dump_ctx *ctx;
-
-                       ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
-                       if (!ctx)
-                               return -ENOMEM;
-
-                       if (nla[NFTA_RULE_TABLE]) {
-                               ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
-                                                       GFP_ATOMIC);
-                               if (!ctx->table) {
-                                       kfree(ctx);
-                                       return -ENOMEM;
-                               }
-                       }
-                       if (nla[NFTA_RULE_CHAIN]) {
-                               ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
-                                                       GFP_ATOMIC);
-                               if (!ctx->chain) {
-                                       kfree(ctx->table);
-                                       kfree(ctx);
-                                       return -ENOMEM;
-                               }
-                       }
-                       c.data = ctx;
-               }
-
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
        struct nft_rule *rule;
        int err;
 
+       if (ctx->level == NFT_JUMP_STACK_SIZE)
+               return -EMLINK;
+
        list_for_each_entry(rule, &chain->rules, list) {
                if (!nft_is_active_next(ctx->net, rule))
                        continue;
@@ -3161,6 +3189,18 @@ done:
        return skb->len;
 }
 
+static int nf_tables_dump_sets_start(struct netlink_callback *cb)
+{
+       struct nft_ctx *ctx_dump = NULL;
+
+       ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
+       if (ctx_dump == NULL)
+               return -ENOMEM;
+
+       cb->data = ctx_dump;
+       return 0;
+}
+
 static int nf_tables_dump_sets_done(struct netlink_callback *cb)
 {
        kfree(cb->data);
@@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_sets_start,
                        .dump = nf_tables_dump_sets,
                        .done = nf_tables_dump_sets_done,
+                       .data = &ctx,
                        .module = THIS_MODULE,
                };
-               struct nft_ctx *ctx_dump;
-
-               ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC);
-               if (ctx_dump == NULL)
-                       return -ENOMEM;
-
-               *ctx_dump = ctx;
-               c.data = ctx_dump;
 
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
@@ -3849,6 +3883,15 @@ nla_put_failure:
        return -ENOSPC;
 }
 
+static int nf_tables_dump_set_start(struct netlink_callback *cb)
+{
+       struct nft_set_dump_ctx *dump_ctx = cb->data;
+
+       cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
+
+       return cb->data ? 0 : -ENOMEM;
+}
+
 static int nf_tables_dump_set_done(struct netlink_callback *cb)
 {
        kfree(cb->data);
@@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_set_start,
                        .dump = nf_tables_dump_set,
                        .done = nf_tables_dump_set_done,
                        .module = THIS_MODULE,
                };
-               struct nft_set_dump_ctx *dump_ctx;
-
-               dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC);
-               if (!dump_ctx)
-                       return -ENOMEM;
-
-               dump_ctx->set = set;
-               dump_ctx->ctx = ctx;
+               struct nft_set_dump_ctx dump_ctx = {
+                       .set = set,
+                       .ctx = ctx,
+               };
 
-               c.data = dump_ctx;
+               c.data = &dump_ctx;
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -4975,38 +5015,42 @@ done:
        return skb->len;
 }
 
-static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+static int nf_tables_dump_obj_start(struct netlink_callback *cb)
 {
-       struct nft_obj_filter *filter = cb->data;
+       const struct nlattr * const *nla = cb->data;
+       struct nft_obj_filter *filter = NULL;
 
-       if (filter) {
-               kfree(filter->table);
-               kfree(filter);
+       if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
+               filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+               if (!filter)
+                       return -ENOMEM;
+
+               if (nla[NFTA_OBJ_TABLE]) {
+                       filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
+                       if (!filter->table) {
+                               kfree(filter);
+                               return -ENOMEM;
+                       }
+               }
+
+               if (nla[NFTA_OBJ_TYPE])
+                       filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
        }
 
+       cb->data = filter;
        return 0;
 }
 
-static struct nft_obj_filter *
-nft_obj_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_obj_done(struct netlink_callback *cb)
 {
-       struct nft_obj_filter *filter;
-
-       filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
-       if (!filter)
-               return ERR_PTR(-ENOMEM);
+       struct nft_obj_filter *filter = cb->data;
 
-       if (nla[NFTA_OBJ_TABLE]) {
-               filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
-               if (!filter->table) {
-                       kfree(filter);
-                       return ERR_PTR(-ENOMEM);
-               }
+       if (filter) {
+               kfree(filter->table);
+               kfree(filter);
        }
-       if (nla[NFTA_OBJ_TYPE])
-               filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
 
-       return filter;
+       return 0;
 }
 
 /* called with rcu_read_lock held */
@@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_obj_start,
                        .dump = nf_tables_dump_obj,
                        .done = nf_tables_dump_obj_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_OBJ_TABLE] ||
-                   nla[NFTA_OBJ_TYPE]) {
-                       struct nft_obj_filter *filter;
-
-                       filter = nft_obj_filter_alloc(nla);
-                       if (IS_ERR(filter))
-                               return -ENOMEM;
-
-                       c.data = filter;
-               }
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
                flowtable->ops[i].priv          = &flowtable->data;
                flowtable->ops[i].hook          = flowtable->data.type->hook;
                flowtable->ops[i].dev           = dev_array[i];
-               flowtable->dev_name[i]          = kstrdup(dev_array[i]->name,
-                                                         GFP_KERNEL);
        }
 
        return err;
@@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
 err6:
        i = flowtable->ops_len;
 err5:
-       for (k = i - 1; k >= 0; k--) {
-               kfree(flowtable->dev_name[k]);
+       for (k = i - 1; k >= 0; k--)
                nf_unregister_net_hook(net, &flowtable->ops[k]);
-       }
 
        kfree(flowtable->ops);
 err4:
@@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        for (i = 0; i < flowtable->ops_len; i++) {
-               if (flowtable->dev_name[i][0] &&
-                   nla_put_string(skb, NFTA_DEVICE_NAME,
-                                  flowtable->dev_name[i]))
+               const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
+
+               if (dev &&
+                   nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
                        goto nla_put_failure;
        }
        nla_nest_end(skb, nest_devs);
@@ -5650,37 +5683,39 @@ done:
        return skb->len;
 }
 
-static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
 {
-       struct nft_flowtable_filter *filter = cb->data;
+       const struct nlattr * const *nla = cb->data;
+       struct nft_flowtable_filter *filter = NULL;
 
-       if (!filter)
-               return 0;
+       if (nla[NFTA_FLOWTABLE_TABLE]) {
+               filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+               if (!filter)
+                       return -ENOMEM;
 
-       kfree(filter->table);
-       kfree(filter);
+               filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+                                          GFP_ATOMIC);
+               if (!filter->table) {
+                       kfree(filter);
+                       return -ENOMEM;
+               }
+       }
 
+       cb->data = filter;
        return 0;
 }
 
-static struct nft_flowtable_filter *
-nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
 {
-       struct nft_flowtable_filter *filter;
+       struct nft_flowtable_filter *filter = cb->data;
 
-       filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
        if (!filter)
-               return ERR_PTR(-ENOMEM);
+               return 0;
 
-       if (nla[NFTA_FLOWTABLE_TABLE]) {
-               filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
-                                          GFP_ATOMIC);
-               if (!filter->table) {
-                       kfree(filter);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       return filter;
+       kfree(filter->table);
+       kfree(filter);
+
+       return 0;
 }
 
 /* called with rcu_read_lock held */
@@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_flowtable_start,
                        .dump = nf_tables_dump_flowtable,
                        .done = nf_tables_dump_flowtable_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_FLOWTABLE_TABLE]) {
-                       struct nft_flowtable_filter *filter;
-
-                       filter = nft_flowtable_filter_alloc(nla);
-                       if (IS_ERR(filter))
-                               return -ENOMEM;
-
-                       c.data = filter;
-               }
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
        kfree(flowtable->name);
        flowtable->data.type->free(&flowtable->data);
        module_put(flowtable->data.type->owner);
+       kfree(flowtable);
 }
 
 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
                        continue;
 
                nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
-               flowtable->dev_name[i][0] = '\0';
                flowtable->ops[i].dev = NULL;
                break;
        }
@@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans)
        case NFT_MSG_DELTABLE:
                nf_tables_table_destroy(&trans->ctx);
                break;
+       case NFT_MSG_NEWCHAIN:
+               kfree(nft_trans_chain_name(trans));
+               break;
        case NFT_MSG_DELCHAIN:
                nf_tables_chain_destroy(&trans->ctx);
                break;
@@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
                        break;
                case NFT_MSG_NEWCHAIN:
-                       if (nft_trans_chain_update(trans))
+                       if (nft_trans_chain_update(trans)) {
                                nft_chain_commit_update(trans);
-                       else
+                               nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                               /* trans destroyed after rcu grace period */
+                       } else {
                                nft_clear(net, trans->ctx.chain);
-
-                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
-                       nft_trans_destroy(trans);
+                               nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                               nft_trans_destroy(trans);
+                       }
                        break;
                case NFT_MSG_DELCHAIN:
                        nft_chain_del(trans->ctx.chain);
@@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net)
                case NFT_MSG_NEWCHAIN:
                        if (nft_trans_chain_update(trans)) {
                                free_percpu(nft_trans_chain_stats(trans));
-
+                               kfree(nft_trans_chain_name(trans));
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
@@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
                        err = nf_tables_check_loops(ctx, data->verdict.chain);
                        if (err < 0)
                                return err;
-
-                       if (ctx->chain->level + 1 >
-                           data->verdict.chain->level) {
-                               if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
-                                       return -EMLINK;
-                               data->verdict.chain->level = ctx->chain->level + 1;
-                       }
                }
 
                return 0;
index 15adf8c..0777a93 100644 (file)
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
                                  const struct nft_data **d)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
        const struct nft_data *data;
        int err;
 
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
+               pctx->level++;
                err = nft_chain_validate(ctx, data->verdict.chain);
                if (err < 0)
                        return err;
+               pctx->level--;
                break;
        default:
                break;
index 42e6fad..c2a1d84 100644 (file)
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
        const struct nft_data *data;
+       int err;
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
            *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
-               return nft_chain_validate(ctx, data->verdict.chain);
+               pctx->level++;
+               err = nft_chain_validate(ctx, data->verdict.chain);
+               if (err < 0)
+                       return err;
+               pctx->level--;
+               break;
        default:
-               return 0;
+               break;
        }
+
+       return 0;
 }
 
 static int nft_lookup_validate(const struct nft_ctx *ctx,
index 72ef35b..90c3e7e 100644 (file)
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
        struct nft_rhash *priv = nft_set_priv(set);
 
        cancel_delayed_work_sync(&priv->gc_work);
+       rcu_barrier();
        rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
                                    (void *)set);
 }
index 1f8f257..9873d73 100644 (file)
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work)
 
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
-                       goto out;
+                       break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work)
                        rbe = rb_entry(prev, struct nft_rbtree_elem, node);
                        atomic_dec(&set->nelems);
                        nft_set_gc_batch_add(gcb, rbe);
+                       prev = NULL;
                }
                node = rb_next(node);
+               if (!node)
+                       break;
        }
-out:
        if (gcb) {
                for (i = 0; i < gcb->head.cnt; i++) {
                        rbe = gcb->elems[i];
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        struct rb_node *node;
 
        cancel_delayed_work_sync(&priv->gc_work);
+       rcu_barrier();
        while ((node = priv->root.rb_node) != NULL) {
                rb_erase(node, &priv->root);
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
index 393573a..56704d9 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/hash.h>
 #include <linux/genetlink.h>
 #include <linux/net_namespace.h>
+#include <linux/nospec.h>
 
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
@@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
 
        if (protocol < 0 || protocol >= MAX_LINKS)
                return -EPROTONOSUPPORT;
+       protocol = array_index_nospec(protocol, MAX_LINKS);
 
        netlink_lock_table();
 #ifdef CONFIG_MODULES
@@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        return err;
        }
 
+       if (nlk->ngroups == 0)
+               groups = 0;
+       else if (nlk->ngroups < 8*sizeof(groups))
+               groups &= (1UL << nlk->ngroups) - 1;
+
        bound = nlk->bound;
        if (bound) {
                /* Ensure nlk->portid is up-to-date. */
index b891a91..c038e02 100644 (file)
@@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
        if (!meter)
                return ERR_PTR(-ENOMEM);
 
+       meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]);
        meter->used = div_u64(ktime_get_ns(), 1000 * 1000);
        meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0;
        meter->keep_stats = !a[OVS_METER_ATTR_CLEAR];
@@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
        u32 meter_id;
        bool failed;
 
+       if (!a[OVS_METER_ATTR_ID]) {
+               return -ENODEV;
+       }
+
        meter = dp_meter_create(a);
        if (IS_ERR_OR_NULL(meter))
                return PTR_ERR(meter);
@@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock;
        }
 
-       if (!a[OVS_METER_ATTR_ID]) {
-               err = -ENODEV;
-               goto exit_unlock;
-       }
-
        meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
 
        /* Cannot fail after this. */
index 9b27d0c..e6445d8 100644 (file)
@@ -4226,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (req->tp_block_nr) {
+               unsigned int min_frame_size;
+
                /* Sanity tests and some calculations */
                err = -EBUSY;
                if (unlikely(rb->pg_vec))
@@ -4248,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
                        goto out;
+               min_frame_size = po->tp_hdrlen + po->tp_reserve;
                if (po->tp_version >= TPACKET_V3 &&
-                   req->tp_block_size <=
-                   BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
+                   req->tp_block_size <
+                   BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
                        goto out;
-               if (unlikely(req->tp_frame_size < po->tp_hdrlen +
-                                       po->tp_reserve))
+               if (unlikely(req->tp_frame_size < min_frame_size))
                        goto out;
                if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
                        goto out;
index 48332a6..d152e48 100644 (file)
@@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
        struct rds_ib_frmr *frmr;
        int ret;
 
+       if (!ic) {
+               /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
        do {
                if (ibmr)
                        rds_ib_free_frmr(ibmr, true);
index 0ea4ab0..655f01d 100644 (file)
@@ -115,7 +115,8 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
                        struct rds_info_rdma_connection *iinfo);
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
-                   struct rds_sock *rs, u32 *key_ret);
+                   struct rds_sock *rs, u32 *key_ret,
+                   struct rds_connection *conn);
 void rds_ib_sync_mr(void *trans_private, int dir);
 void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
index e678699..2e49a40 100644 (file)
@@ -537,11 +537,12 @@ void rds_ib_flush_mrs(void)
 }
 
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
-                   struct rds_sock *rs, u32 *key_ret)
+                   struct rds_sock *rs, u32 *key_ret,
+                   struct rds_connection *conn)
 {
        struct rds_ib_device *rds_ibdev;
        struct rds_ib_mr *ibmr = NULL;
-       struct rds_ib_connection *ic = rs->rs_conn->c_transport_data;
+       struct rds_ib_connection *ic = NULL;
        int ret;
 
        rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
@@ -550,6 +551,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
                goto out;
        }
 
+       if (conn)
+               ic = conn->c_transport_data;
+
        if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
                ret = -ENODEV;
                goto out;
@@ -559,17 +563,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
                ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
        else
                ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
-       if (ibmr)
-               rds_ibdev = NULL;
-
- out:
-       if (!ibmr)
+       if (IS_ERR(ibmr)) {
+               ret = PTR_ERR(ibmr);
                pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
+       } else {
+               return ibmr;
+       }
 
+ out:
        if (rds_ibdev)
                rds_ib_dev_put(rds_ibdev);
 
-       return ibmr;
+       return ERR_PTR(ret);
 }
 
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
index 634cfcb..80920e4 100644 (file)
@@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
 }
 
 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
-                               u64 *cookie_ret, struct rds_mr **mr_ret)
+                         u64 *cookie_ret, struct rds_mr **mr_ret,
+                         struct rds_conn_path *cp)
 {
        struct rds_mr *mr = NULL, *found;
        unsigned int nr_pages;
@@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
         * Note that dma_map() implies that pending writes are
         * flushed to RAM, so no dma_sync is needed here. */
        trans_private = rs->rs_transport->get_mr(sg, nents, rs,
-                                                &mr->r_key);
+                                                &mr->r_key,
+                                                cp ? cp->cp_conn : NULL);
 
        if (IS_ERR(trans_private)) {
                for (i = 0 ; i < nents; i++)
@@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
                           sizeof(struct rds_get_mr_args)))
                return -EFAULT;
 
-       return __rds_rdma_map(rs, &args, NULL, NULL);
+       return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
 }
 
 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
@@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
        new_args.cookie_addr = args.cookie_addr;
        new_args.flags = args.flags;
 
-       return __rds_rdma_map(rs, &new_args, NULL, NULL);
+       return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
 }
 
 /*
@@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
            rm->m_rdma_cookie != 0)
                return -EINVAL;
 
-       return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
+       return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
+                             &rm->rdma.op_rdma_mr, rm->m_conn_path);
 }
 
 /*
index f2272fb..60b3b78 100644 (file)
@@ -464,6 +464,8 @@ struct rds_message {
                        struct scatterlist      *op_sg;
                } data;
        };
+
+       struct rds_conn_path *m_conn_path;
 };
 
 /*
@@ -544,7 +546,8 @@ struct rds_transport {
                                        unsigned int avail);
        void (*exit)(void);
        void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
-                       struct rds_sock *rs, u32 *key_ret);
+                       struct rds_sock *rs, u32 *key_ret,
+                       struct rds_connection *conn);
        void (*sync_mr)(void *trans_private, int direction);
        void (*free_mr)(void *trans_private, int invalidate);
        void (*flush_mrs)(void);
index 94c7f74..59f17a2 100644 (file)
@@ -1169,6 +1169,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                rs->rs_conn = conn;
        }
 
+       if (conn->c_trans->t_mp_capable)
+               cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
+       else
+               cpath = &conn->c_path[0];
+
+       rm->m_conn_path = cpath;
+
        /* Parse any control messages the user may have included. */
        ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
        if (ret) {
@@ -1192,11 +1199,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                goto out;
        }
 
-       if (conn->c_trans->t_mp_capable)
-               cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
-       else
-               cpath = &conn->c_path[0];
-
        if (rds_destroy_pending(conn)) {
                ret = -EAGAIN;
                goto out;
index 5fb7d32..707630a 100644 (file)
@@ -104,9 +104,9 @@ struct rxrpc_net {
 
 #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
        u8                      peer_keepalive_cursor;
-       ktime_t                 peer_keepalive_base;
-       struct hlist_head       peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
-       struct hlist_head       peer_keepalive_new;
+       time64_t                peer_keepalive_base;
+       struct list_head        peer_keepalive[32];
+       struct list_head        peer_keepalive_new;
        struct timer_list       peer_keepalive_timer;
        struct work_struct      peer_keepalive_work;
 };
@@ -295,7 +295,7 @@ struct rxrpc_peer {
        struct hlist_head       error_targets;  /* targets for net error distribution */
        struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
-       struct hlist_node       keepalive_link; /* Link in net->peer_keepalive[] */
+       struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
        seqlock_t               service_conn_lock;
        spinlock_t              lock;           /* access lock */
index a9a9be5..9d1e298 100644 (file)
@@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
                while (*pp) {
                        parent = *pp;
                        xcall = rb_entry(parent, struct rxrpc_call, sock_node);
-                       if (user_call_ID < call->user_call_ID)
+                       if (user_call_ID < xcall->user_call_ID)
                                pp = &(*pp)->rb_left;
-                       else if (user_call_ID > call->user_call_ID)
+                       else if (user_call_ID > xcall->user_call_ID)
                                pp = &(*pp)->rb_right;
                        else
                                goto id_in_use;
index 8229a52..3fde001 100644 (file)
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
 
        _leave(" = 0");
        return 0;
index 5d6a773..417d808 100644 (file)
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
        hash_init(rxnet->peer_hash);
        spin_lock_init(&rxnet->peer_hash_lock);
        for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
-               INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
-       INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
+               INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
+       INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
        timer_setup(&rxnet->peer_keepalive_timer,
                    rxrpc_peer_keepalive_timeout, 0);
        INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
-       rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
+       rxnet->peer_keepalive_base = ktime_get_seconds();
 
        ret = -ENOMEM;
        rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
index f03de1c..4774c8f 100644 (file)
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        now = ktime_get_real();
        if (ping)
                call->ping_time = now;
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
 
        ret = kernel_sendmsg(conn->params.local->socket,
                             &msg, iov, 1, sizeof(pkt));
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
         *     message and update the peer record
         */
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
 
        up_read(&conn->params.local->defrag_sem);
        if (ret < 0)
@@ -457,7 +457,7 @@ send_fragmentable:
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
                                             iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_real();
+                       conn->params.peer->last_tx_at = ktime_get_seconds();
 
                        opt = IP_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ send_fragmentable:
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
                                             iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_real();
+                       conn->params.peer->last_tx_at = ktime_get_seconds();
 
                        opt = IPV6_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
                trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
                                    rxrpc_tx_fail_version_keepalive);
 
-       peer->last_tx_at = ktime_get_real();
+       peer->last_tx_at = ktime_get_seconds();
        _leave("");
 }
index 0ed8b65..4f9da2f 100644 (file)
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
 }
 
 /*
- * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ * Perform keep-alive pings.
  */
-void rxrpc_peer_keepalive_worker(struct work_struct *work)
+static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+                                         struct list_head *collector,
+                                         time64_t base,
+                                         u8 cursor)
 {
-       struct rxrpc_net *rxnet =
-               container_of(work, struct rxrpc_net, peer_keepalive_work);
        struct rxrpc_peer *peer;
-       unsigned long delay;
-       ktime_t base, now = ktime_get_real();
-       s64 diff;
-       u8 cursor, slot;
+       const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+       time64_t keepalive_at;
+       int slot;
 
-       base = rxnet->peer_keepalive_base;
-       cursor = rxnet->peer_keepalive_cursor;
+       spin_lock_bh(&rxnet->peer_hash_lock);
 
-       _enter("%u,%lld", cursor, ktime_sub(now, base));
+       while (!list_empty(collector)) {
+               peer = list_entry(collector->next,
+                                 struct rxrpc_peer, keepalive_link);
 
-next_bucket:
-       diff = ktime_to_ns(ktime_sub(now, base));
-       if (diff < 0)
-               goto resched;
+               list_del_init(&peer->keepalive_link);
+               if (!rxrpc_get_peer_maybe(peer))
+                       continue;
 
-       _debug("at %u", cursor);
-       spin_lock_bh(&rxnet->peer_hash_lock);
-next_peer:
-       if (!rxnet->live) {
                spin_unlock_bh(&rxnet->peer_hash_lock);
-               goto out;
-       }
 
-       /* Everything in the bucket at the cursor is processed this second; the
-        * bucket at cursor + 1 goes now + 1s and so on...
-        */
-       if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
-               if (hlist_empty(&rxnet->peer_keepalive_new)) {
-                       spin_unlock_bh(&rxnet->peer_hash_lock);
-                       goto emptied_bucket;
+               keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+               slot = keepalive_at - base;
+               _debug("%02x peer %u t=%d {%pISp}",
+                      cursor, peer->debug_id, slot, &peer->srx.transport);
+
+               if (keepalive_at <= base ||
+                   keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+                       rxrpc_send_keepalive(peer);
+                       slot = RXRPC_KEEPALIVE_TIME;
                }
 
-               hlist_move_list(&rxnet->peer_keepalive_new,
-                               &rxnet->peer_keepalive[cursor]);
+               /* A transmission to this peer occurred since last we examined
+                * it so put it into the appropriate future bucket.
+                */
+               slot += cursor;
+               slot &= mask;
+               spin_lock_bh(&rxnet->peer_hash_lock);
+               list_add_tail(&peer->keepalive_link,
+                             &rxnet->peer_keepalive[slot & mask]);
+               rxrpc_put_peer(peer);
        }
 
-       peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
-                          struct rxrpc_peer, keepalive_link);
-       hlist_del_init(&peer->keepalive_link);
-       if (!rxrpc_get_peer_maybe(peer))
-               goto next_peer;
-
        spin_unlock_bh(&rxnet->peer_hash_lock);
+}
 
-       _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
+/*
+ * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ */
+void rxrpc_peer_keepalive_worker(struct work_struct *work)
+{
+       struct rxrpc_net *rxnet =
+               container_of(work, struct rxrpc_net, peer_keepalive_work);
+       const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+       time64_t base, now, delay;
+       u8 cursor, stop;
+       LIST_HEAD(collector);
 
-recalc:
-       diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
-       if (diff < -30 || diff > 30)
-               goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
-       diff += RXRPC_KEEPALIVE_TIME - 1;
-       if (diff < 0)
-               goto send;
+       now = ktime_get_seconds();
+       base = rxnet->peer_keepalive_base;
+       cursor = rxnet->peer_keepalive_cursor;
+       _enter("%lld,%u", base - now, cursor);
 
-       slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
-       if (slot == 0)
-               goto send;
+       if (!rxnet->live)
+               return;
 
-       /* A transmission to this peer occurred since last we examined it so
-        * put it into the appropriate future bucket.
+       /* Remove to a temporary list all the peers that are currently lodged
+        * in expired buckets plus all new peers.
+        *
+        * Everything in the bucket at the cursor is processed this
+        * second; the bucket at cursor + 1 goes at now + 1s and so
+        * on...
         */
-       slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
        spin_lock_bh(&rxnet->peer_hash_lock);
-       hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
-       rxrpc_put_peer(peer);
-       goto next_peer;
-
-send:
-       rxrpc_send_keepalive(peer);
-       now = ktime_get_real();
-       goto recalc;
+       list_splice_init(&rxnet->peer_keepalive_new, &collector);
+
+       stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
+       while (base <= now && (s8)(cursor - stop) < 0) {
+               list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
+                                     &collector);
+               base++;
+               cursor++;
+       }
 
-emptied_bucket:
-       cursor++;
-       if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
-               cursor = 0;
-       base = ktime_add_ns(base, NSEC_PER_SEC);
-       goto next_bucket;
+       base = now;
+       spin_unlock_bh(&rxnet->peer_hash_lock);
 
-resched:
        rxnet->peer_keepalive_base = base;
        rxnet->peer_keepalive_cursor = cursor;
-       delay = nsecs_to_jiffies(-diff) + 1;
-       timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
-out:
+       rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
+       ASSERT(list_empty(&collector));
+
+       /* Schedule the timer for the next occupied timeslot. */
+       cursor = rxnet->peer_keepalive_cursor;
+       stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
+       for (; (s8)(cursor - stop) < 0; cursor++) {
+               if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
+                       break;
+               base++;
+       }
+
+       now = ktime_get_seconds();
+       delay = base - now;
+       if (delay < 1)
+               delay = 1;
+       delay *= HZ;
+       if (rxnet->live)
+               timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
+
        _leave("");
 }
index 1b7e810..24ec7cd 100644 (file)
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
        if (!peer) {
                peer = prealloc;
                hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
+               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        }
 
        spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                if (!peer) {
                        hash_add_rcu(rxnet->peer_hash,
                                     &candidate->hash_link, hash_key);
-                       hlist_add_head(&candidate->keepalive_link,
-                                      &rxnet->peer_keepalive_new);
+                       list_add_tail(&candidate->keepalive_link,
+                                     &rxnet->peer_keepalive_new);
                }
 
                spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 
        spin_lock_bh(&rxnet->peer_hash_lock);
        hash_del_rcu(&peer->hash_link);
-       hlist_del_init(&peer->keepalive_link);
+       list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
        kfree_rcu(peer, rcu);
index 278ac08..47cb019 100644 (file)
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        _leave(" = 0");
        return 0;
 }
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        _leave(" = 0");
        return 0;
 }
index 05e4ffe..e7de5f2 100644 (file)
@@ -1122,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
                sock_hold(lsk); /* sock_put in smc_listen_work */
                INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
                smc_copy_sock_settings_to_smc(new_smc);
+               new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
+               new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
                sock_hold(&new_smc->sk); /* sock_put in passive closing */
                if (!schedule_work(&new_smc->smc_listen_work))
                        sock_put(&new_smc->sk);
@@ -1397,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
        lock_sock(sk);
 
        rc = -ENOTCONN;
-       if ((sk->sk_state != SMC_LISTEN) &&
-           (sk->sk_state != SMC_ACTIVE) &&
+       if ((sk->sk_state != SMC_ACTIVE) &&
            (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
            (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
            (sk->sk_state != SMC_APPCLOSEWAIT1) &&
@@ -1521,12 +1522,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
 
        smc = smc_sk(sock->sk);
        conn = &smc->conn;
+       lock_sock(&smc->sk);
        if (smc->use_fallback) {
-               if (!smc->clcsock)
+               if (!smc->clcsock) {
+                       release_sock(&smc->sk);
                        return -EBADF;
-               return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+               }
+               answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+               release_sock(&smc->sk);
+               return answ;
        }
-       lock_sock(&smc->sk);
        switch (cmd) {
        case SIOCINQ: /* same as FIONREAD */
                if (smc->sk.sk_state == SMC_LISTEN) {
index a7e8d63..9bde1e4 100644 (file)
@@ -233,7 +233,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
                        /* force immediate tx of current consumer cursor, but
                         * under send_lock to guarantee arrival in seqno-order
                         */
-                       smc_tx_sndbuf_nonempty(conn);
+                       if (smc->sk.sk_state != SMC_INIT)
+                               smc_tx_sndbuf_nonempty(conn);
                }
        }
 
index 8563362..8c24d5d 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -2522,6 +2523,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 
        if (call < 1 || call > SYS_SENDMMSG)
                return -EINVAL;
+       call = array_index_nospec(call, SYS_SENDMMSG + 1);
 
        len = nargs[call];
        if (len > sizeof(a))
@@ -2688,7 +2690,8 @@ EXPORT_SYMBOL(sock_unregister);
 
 bool sock_is_registered(int family)
 {
-       return family < NPROTO && rcu_access_pointer(net_families[family]);
+       return family < NPROTO &&
+               rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]);
 }
 
 static int __init sock_init(void)
index a7f6964..62199cf 100644 (file)
@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
 {
        struct tipc_net *tn = tipc_net(net);
 
-       spin_lock_bh(&tn->node_list_lock);
-       if (!tipc_own_addr(net)) {
+       if (!cmpxchg(&tn->node_addr, 0, addr)) {
                tipc_set_node_addr(net, addr);
                tipc_named_reinit(net);
                tipc_sk_reinit(net);
                tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
                                     TIPC_CLUSTER_SCOPE, 0, addr);
        }
-       spin_unlock_bh(&tn->node_list_lock);
 }
 
 void tipc_net_stop(struct net *net)
index 4618f1c..1f3d978 100644 (file)
@@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
                        return NULL;
                }
 
+               if (sk->sk_shutdown & RCV_SHUTDOWN)
+                       return NULL;
+
                if (sock_flag(sk, SOCK_DONE))
                        return NULL;
 
index c1076c1..ab27a28 100644 (file)
@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
        return transport->shutdown(vsock_sk(sk), mode);
 }
 
-void vsock_pending_work(struct work_struct *work)
+static void vsock_pending_work(struct work_struct *work)
 {
        struct sock *sk;
        struct sock *listener;
        struct vsock_sock *vsk;
        bool cleanup;
 
-       vsk = container_of(work, struct vsock_sock, dwork.work);
+       vsk = container_of(work, struct vsock_sock, pending_work.work);
        sk = sk_vsock(vsk);
        listener = vsk->listener;
        cleanup = true;
@@ -498,7 +498,6 @@ out:
        sock_put(sk);
        sock_put(listener);
 }
-EXPORT_SYMBOL_GPL(vsock_pending_work);
 
 /**** SOCKET OPERATIONS ****/
 
@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
        return retval;
 }
 
+static void vsock_connect_timeout(struct work_struct *work);
+
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
        vsk->sent_request = false;
        vsk->ignore_connecting_rst = false;
        vsk->peer_shutdown = 0;
+       INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
+       INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
 
        psk = parent ? vsock_sk(parent) : NULL;
        if (parent) {
@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
        struct vsock_sock *vsk;
        int cancel = 0;
 
-       vsk = container_of(work, struct vsock_sock, dwork.work);
+       vsk = container_of(work, struct vsock_sock, connect_work.work);
        sk = sk_vsock(vsk);
 
        lock_sock(sk);
@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
                         * timeout fires.
                         */
                        sock_hold(sk);
-                       INIT_DELAYED_WORK(&vsk->dwork,
-                                         vsock_connect_timeout);
-                       schedule_delayed_work(&vsk->dwork, timeout);
+                       schedule_delayed_work(&vsk->connect_work, timeout);
 
                        /* Skip ahead to preserve error code set above. */
                        goto out_wait;
index a7a73ff..cb332ad 100644 (file)
@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
        vpending->listener = sk;
        sock_hold(sk);
        sock_hold(pending);
-       INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
-       schedule_delayed_work(&vpending->dwork, HZ);
+       schedule_delayed_work(&vpending->pending_work, HZ);
 
 out:
        return err;
index 4eece06..80bc986 100644 (file)
@@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info,
                params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
                                         BIT(NL80211_STA_FLAG_MFP) |
                                         BIT(NL80211_STA_FLAG_AUTHORIZED);
+               break;
        default:
                return -EINVAL;
        }
@@ -14923,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
 
 static int __nl80211_rx_control_port(struct net_device *dev,
-                                    const u8 *buf, size_t len,
-                                    const u8 *addr, u16 proto,
+                                    struct sk_buff *skb,
                                     bool unencrypted, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+       struct ethhdr *ehdr = eth_hdr(skb);
+       const u8 *addr = ehdr->h_source;
+       u16 proto = be16_to_cpu(skb->protocol);
        struct sk_buff *msg;
        void *hdr;
+       struct nlattr *frame;
+
        u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
 
        if (!nlportid)
                return -ENOENT;
 
-       msg = nlmsg_new(100 + len, gfp);
+       msg = nlmsg_new(100 + skb->len, gfp);
        if (!msg)
                return -ENOMEM;
 
@@ -14950,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev,
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
            nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
                              NL80211_ATTR_PAD) ||
-           nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
            nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
            (unencrypted && nla_put_flag(msg,
                                         NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
                goto nla_put_failure;
 
+       frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
+       if (!frame)
+               goto nla_put_failure;
+
+       skb_copy_bits(skb, 0, nla_data(frame), skb->len);
        genlmsg_end(msg, hdr);
 
        return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
@@ -14967,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev,
 }
 
 bool cfg80211_rx_control_port(struct net_device *dev,
-                             const u8 *buf, size_t len,
-                             const u8 *addr, u16 proto, bool unencrypted)
+                             struct sk_buff *skb, bool unencrypted)
 {
        int ret;
 
-       trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted);
-       ret = __nl80211_rx_control_port(dev, buf, len, addr, proto,
-                                       unencrypted, GFP_ATOMIC);
+       trace_cfg80211_rx_control_port(dev, skb, unencrypted);
+       ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
        trace_cfg80211_return_bool(ret == 0);
        return ret == 0;
 }
index bbe6298..4fc66a1 100644 (file)
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
                 * as some drivers used this to restore its orig_* reg domain.
                 */
                if (initiator == NL80211_REGDOM_SET_BY_CORE &&
-                   wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
+                   wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
+                   !(wiphy->regulatory_flags &
+                     REGULATORY_WIPHY_SELF_MANAGED))
                        reg_call_notifier(wiphy, lr);
                return;
        }
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
        }
 }
 
-static bool reg_only_self_managed_wiphys(void)
-{
-       struct cfg80211_registered_device *rdev;
-       struct wiphy *wiphy;
-       bool self_managed_found = false;
-
-       ASSERT_RTNL();
-
-       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
-               wiphy = &rdev->wiphy;
-               if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
-                       self_managed_found = true;
-               else
-                       return false;
-       }
-
-       /* make sure at least one self-managed wiphy exists */
-       return self_managed_found;
-}
-
 /*
  * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
  * Regulatory hints come on a first come first serve basis and we
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void)
        spin_unlock(&reg_requests_lock);
 
        notify_self_managed_wiphys(reg_request);
-       if (reg_only_self_managed_wiphys()) {
-               reg_free_request(reg_request);
-               return;
-       }
 
        reg_process_hint(reg_request);
 
index 2b417a2..7c73510 100644 (file)
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
 );
 
 TRACE_EVENT(cfg80211_rx_control_port,
-       TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len,
-                const u8 *addr, u16 proto, bool unencrypted),
-       TP_ARGS(netdev, buf, len, addr, proto, unencrypted),
+       TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
+                bool unencrypted),
+       TP_ARGS(netdev, skb, unencrypted),
        TP_STRUCT__entry(
                NETDEV_ENTRY
-               MAC_ENTRY(addr)
+               __field(int, len)
+               MAC_ENTRY(from)
                __field(u16, proto)
                __field(bool, unencrypted)
        ),
        TP_fast_assign(
                NETDEV_ASSIGN;
-               MAC_ASSIGN(addr, addr);
-               __entry->proto = proto;
+               __entry->len = skb->len;
+               MAC_ASSIGN(from, eth_hdr(skb)->h_source);
+               __entry->proto = be16_to_cpu(skb->protocol);
                __entry->unencrypted = unencrypted;
        ),
-       TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s",
-                 NETDEV_PR_ARG, MAC_PR_ARG(addr),
+       TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
+                 NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
                  __entry->proto, BOOL_TO_STR(__entry->unencrypted))
 );
 
index 72335c2..4e937cd 100644 (file)
@@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
 {
        int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
 
-       if (err) {
-               xdp_return_buff(xdp);
+       if (err)
                xs->rx_dropped++;
-       }
 
        return err;
 }
index 52ecaf7..8a64b15 100644 (file)
@@ -250,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q)
 
 static inline bool xskq_empty_desc(struct xsk_queue *q)
 {
-       return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
+       return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
 }
 
 void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
index 5f48251..7c5e897 100644 (file)
@@ -2286,6 +2286,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
        if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
                return make_blackhole(net, dst_orig->ops->family, dst_orig);
 
+       if (IS_ERR(dst))
+               dst_release(dst_orig);
+
        return dst;
 }
 EXPORT_SYMBOL(xfrm_lookup_route);
index 080035f..33878e6 100644 (file)
@@ -1025,10 +1025,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
 {
        struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
 
-       if (nlsk)
-               return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
-       else
-               return -1;
+       if (!nlsk) {
+               kfree_skb(skb);
+               return -EPIPE;
+       }
+
+       return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
 }
 
 static inline unsigned int xfrm_spdinfo_msgsize(void)
@@ -1671,9 +1673,11 @@ static inline unsigned int userpolicy_type_attrsize(void)
 #ifdef CONFIG_XFRM_SUB_POLICY
 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
 {
-       struct xfrm_userpolicy_type upt = {
-               .type = type,
-       };
+       struct xfrm_userpolicy_type upt;
+
+       /* Sadly there are two holes in struct xfrm_userpolicy_type */
+       memset(&upt, 0, sizeof(upt));
+       upt.type = type;
 
        return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
 }
index 303e9e7..4938dcb 100644 (file)
@@ -14,7 +14,7 @@
 #include <uapi/linux/bpf.h>
 #include "bpf_helpers.h"
 
-#define MAX_CPUS 12 /* WARNING - sync with _user.c */
+#define MAX_CPUS 64 /* WARNING - sync with _user.c */
 
 /* Special map type that can XDP_REDIRECT frames to another CPU */
 struct bpf_map_def SEC("maps") cpu_map = {
index f6efaef..4b4d78f 100644 (file)
@@ -19,7 +19,7 @@ static const char *__doc__ =
 #include <arpa/inet.h>
 #include <linux/if_link.h>
 
-#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
+#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
 
 /* How many xdp_progs are defined in _kern.c */
 #define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
         * procedure.
         */
        create_cpu_entry(1,  1024, 0, false);
-       create_cpu_entry(1,   128, 0, false);
+       create_cpu_entry(1,     8, 0, false);
        create_cpu_entry(1, 16000, 0, false);
 }
 
index b593b36..38b2b48 100644 (file)
@@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
 endif
 
-ifdef CONFIG_UBSAN_NULL
-      CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
-endif
-
       # -fsanitize=* options makes GCC less smart than usual and
       # increase number of 'maybe-uninitialized false-positives
       CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
index ac5ba55..985534d 100644 (file)
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
 #define __NR_rseq              387
+#define __NR_io_pgetevents     388
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/tools/arch/x86/include/asm/mcsafe_test.h b/tools/arch/x86/include/asm/mcsafe_test.h
new file mode 100644 (file)
index 0000000..2ccd588
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MCSAFE_TEST_H_
+#define _MCSAFE_TEST_H_
+
+.macro MCSAFE_TEST_CTL
+.endm
+
+.macro MCSAFE_TEST_SRC reg count target
+.endm
+
+.macro MCSAFE_TEST_DST reg count target
+.endm
+#endif /* _MCSAFE_TEST_H_ */
index 9a53a06..298ef14 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/cpufeatures.h>
+#include <asm/mcsafe_test.h>
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
@@ -183,12 +184,15 @@ ENTRY(memcpy_orig)
 ENDPROC(memcpy_orig)
 
 #ifndef CONFIG_UML
+
+MCSAFE_TEST_CTL
+
 /*
- * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
+ * __memcpy_mcsafe - memory copy with machine check exception handling
  * Note that we only catch machine checks when reading the source addresses.
  * Writes to target are posted and don't generate machine checks.
  */
-ENTRY(memcpy_mcsafe_unrolled)
+ENTRY(__memcpy_mcsafe)
        cmpl $8, %edx
        /* Less than 8 bytes? Go to byte copy loop */
        jb .L_no_whole_words
@@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled)
        subl $8, %ecx
        negl %ecx
        subl %ecx, %edx
-.L_copy_leading_bytes:
+.L_read_leading_bytes:
        movb (%rsi), %al
+       MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
+       MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
+.L_write_leading_bytes:
        movb %al, (%rdi)
        incq %rsi
        incq %rdi
        decl %ecx
-       jnz .L_copy_leading_bytes
+       jnz .L_read_leading_bytes
 
 .L_8byte_aligned:
-       /* Figure out how many whole cache lines (64-bytes) to copy */
-       movl %edx, %ecx
-       andl $63, %edx
-       shrl $6, %ecx
-       jz .L_no_whole_cache_lines
-
-       /* Loop copying whole cache lines */
-.L_cache_w0: movq (%rsi), %r8
-.L_cache_w1: movq 1*8(%rsi), %r9
-.L_cache_w2: movq 2*8(%rsi), %r10
-.L_cache_w3: movq 3*8(%rsi), %r11
-       movq %r8, (%rdi)
-       movq %r9, 1*8(%rdi)
-       movq %r10, 2*8(%rdi)
-       movq %r11, 3*8(%rdi)
-.L_cache_w4: movq 4*8(%rsi), %r8
-.L_cache_w5: movq 5*8(%rsi), %r9
-.L_cache_w6: movq 6*8(%rsi), %r10
-.L_cache_w7: movq 7*8(%rsi), %r11
-       movq %r8, 4*8(%rdi)
-       movq %r9, 5*8(%rdi)
-       movq %r10, 6*8(%rdi)
-       movq %r11, 7*8(%rdi)
-       leaq 64(%rsi), %rsi
-       leaq 64(%rdi), %rdi
-       decl %ecx
-       jnz .L_cache_w0
-
-       /* Are there any trailing 8-byte words? */
-.L_no_whole_cache_lines:
        movl %edx, %ecx
        andl $7, %edx
        shrl $3, %ecx
        jz .L_no_whole_words
 
-       /* Copy trailing words */
-.L_copy_trailing_words:
+.L_read_words:
        movq (%rsi), %r8
-       mov %r8, (%rdi)
-       leaq 8(%rsi), %rsi
-       leaq 8(%rdi), %rdi
+       MCSAFE_TEST_SRC %rsi 8 .E_read_words
+       MCSAFE_TEST_DST %rdi 8 .E_write_words
+.L_write_words:
+       movq %r8, (%rdi)
+       addq $8, %rsi
+       addq $8, %rdi
        decl %ecx
-       jnz .L_copy_trailing_words
+       jnz .L_read_words
 
        /* Any trailing bytes? */
 .L_no_whole_words:
@@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled)
 
        /* Copy trailing bytes */
        movl %edx, %ecx
-.L_copy_trailing_bytes:
+.L_read_trailing_bytes:
        movb (%rsi), %al
+       MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
+       MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
+.L_write_trailing_bytes:
        movb %al, (%rdi)
        incq %rsi
        incq %rdi
        decl %ecx
-       jnz .L_copy_trailing_bytes
+       jnz .L_read_trailing_bytes
 
        /* Copy successful. Return zero */
 .L_done_memcpy_trap:
        xorq %rax, %rax
        ret
-ENDPROC(memcpy_mcsafe_unrolled)
-EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
+ENDPROC(__memcpy_mcsafe)
+EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
 
        .section .fixup, "ax"
-       /* Return -EFAULT for any failure */
-.L_memcpy_mcsafe_fail:
-       mov     $-EFAULT, %rax
+       /*
+        * Return number of bytes not copied for any failure. Note that
+        * there is no "tail" handling since the source buffer is 8-byte
+        * aligned and poison is cacheline aligned.
+        */
+.E_read_words:
+       shll    $3, %ecx
+.E_leading_bytes:
+       addl    %edx, %ecx
+.E_trailing_bytes:
+       mov     %ecx, %eax
        ret
 
+       /*
+        * For write fault handling, given the destination is unaligned,
+        * we handle faults on multi-byte writes with a byte-by-byte
+        * copy up to the write-protected page.
+        */
+.E_write_words:
+       shll    $3, %ecx
+       addl    %edx, %ecx
+       movl    %ecx, %edx
+       jmp mcsafe_handle_tail
+
        .previous
 
-       _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
-       _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
+       _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
+       _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+       _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+       _ASM_EXTABLE(.L_write_words, .E_write_words)
+       _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
 #endif
index 32f9e39..3f140ef 100644 (file)
@@ -217,6 +217,14 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
        int err;
        int fd;
 
+       if (argc < 3) {
+               p_err("too few arguments, id ID and FILE path is required");
+               return -1;
+       } else if (argc > 3) {
+               p_err("too many arguments");
+               return -1;
+       }
+
        if (!is_prefix(*argv, "id")) {
                p_err("expected 'id' got %s", *argv);
                return -1;
@@ -230,9 +238,6 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
        }
        NEXT_ARG();
 
-       if (argc != 1)
-               usage();
-
        fd = get_fd_by_id(id);
        if (fd < 0) {
                p_err("can't get prog by id (%u): %s", id, strerror(errno));
index 097b1a5..f74a8bc 100644 (file)
@@ -36,6 +36,7 @@
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/kernel.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -90,7 +91,8 @@ static bool map_is_map_of_progs(__u32 type)
 static void *alloc_value(struct bpf_map_info *info)
 {
        if (map_is_per_cpu(info->type))
-               return malloc(info->value_size * get_possible_cpus());
+               return malloc(round_up(info->value_size, 8) *
+                             get_possible_cpus());
        else
                return malloc(info->value_size);
 }
@@ -161,9 +163,10 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
                jsonw_name(json_wtr, "value");
                print_hex_data_json(value, info->value_size);
        } else {
-               unsigned int i, n;
+               unsigned int i, n, step;
 
                n = get_possible_cpus();
+               step = round_up(info->value_size, 8);
 
                jsonw_name(json_wtr, "key");
                print_hex_data_json(key, info->key_size);
@@ -176,7 +179,7 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
                        jsonw_int_field(json_wtr, "cpu", i);
 
                        jsonw_name(json_wtr, "value");
-                       print_hex_data_json(value + i * info->value_size,
+                       print_hex_data_json(value + i * step,
                                            info->value_size);
 
                        jsonw_end_object(json_wtr);
@@ -207,9 +210,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
 
                printf("\n");
        } else {
-               unsigned int i, n;
+               unsigned int i, n, step;
 
                n = get_possible_cpus();
+               step = round_up(info->value_size, 8);
 
                printf("key:\n");
                fprint_hex(stdout, key, info->key_size, " ");
@@ -217,7 +221,7 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
                for (i = 0; i < n; i++) {
                        printf("value (CPU %02d):%c",
                               i, info->value_size > 16 ? '\n' : ' ');
-                       fprint_hex(stdout, value + i * info->value_size,
+                       fprint_hex(stdout, value + i * step,
                                   info->value_size, " ");
                        printf("\n");
                }
index 59b19b6..b7db326 100644 (file)
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *             *     packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
index 0b5ddbe..972265f 100644 (file)
@@ -76,7 +76,7 @@ struct btf_type {
  */
 #define BTF_INT_ENCODING(VAL)  (((VAL) & 0x0f000000) >> 24)
 #define BTF_INT_OFFSET(VAL)    (((VAL  & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL)      ((VAL)  & 0x0000ffff)
+#define BTF_INT_BITS(VAL)      ((VAL)  & 0x000000ff)
 
 /* Attributes stored in the BTF_INT_ENCODING */
 #define BTF_INT_SIGNED (1 << 0)
index b8e288a..eeb787b 100644 (file)
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
        PERF_SAMPLE_PHYS_ADDR                   = 1U << 19,
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
+
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
 };
 
 /*
index 8c54a4b..c36a3a7 100644 (file)
@@ -1,8 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: LGPL-2.1
 /* Copyright (c) 2018 Facebook */
 
 #include <stdlib.h>
-#include <stdint.h>
 #include <string.h>
 #include <unistd.h>
 #include <errno.h>
@@ -27,13 +26,13 @@ struct btf {
        struct btf_type **types;
        const char *strings;
        void *nohdr_data;
-       uint32_t nr_types;
-       uint32_t types_size;
-       uint32_t data_size;
+       __u32 nr_types;
+       __u32 types_size;
+       __u32 data_size;
        int fd;
 };
 
-static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset)
+static const char *btf_name_by_offset(const struct btf *btf, __u32 offset)
 {
        if (offset < btf->hdr->str_len)
                return &btf->strings[offset];
@@ -45,7 +44,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
 {
        if (btf->types_size - btf->nr_types < 2) {
                struct btf_type **new_types;
-               u32 expand_by, new_size;
+               __u32 expand_by, new_size;
 
                if (btf->types_size == BTF_MAX_NR_TYPES)
                        return -E2BIG;
@@ -72,7 +71,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
 static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
 {
        const struct btf_header *hdr = btf->hdr;
-       u32 meta_left;
+       __u32 meta_left;
 
        if (btf->data_size < sizeof(struct btf_header)) {
                elog("BTF header not found\n");
@@ -151,7 +150,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
 
        while (next_type < end_type) {
                struct btf_type *t = next_type;
-               uint16_t vlen = BTF_INFO_VLEN(t->info);
+               __u16 vlen = BTF_INFO_VLEN(t->info);
                int err;
 
                next_type += sizeof(*t);
@@ -190,8 +189,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
        return 0;
 }
 
-static const struct btf_type *btf_type_by_id(const struct btf *btf,
-                                            uint32_t type_id)
+const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
 {
        if (type_id > btf->nr_types)
                return NULL;
@@ -209,7 +207,7 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
        return !t || btf_type_is_void(t);
 }
 
-static int64_t btf_type_size(const struct btf_type *t)
+static __s64 btf_type_size(const struct btf_type *t)
 {
        switch (BTF_INFO_KIND(t->info)) {
        case BTF_KIND_INT:
@@ -226,15 +224,15 @@ static int64_t btf_type_size(const struct btf_type *t)
 
 #define MAX_RESOLVE_DEPTH 32
 
-int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
+__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
 {
        const struct btf_array *array;
        const struct btf_type *t;
-       uint32_t nelems = 1;
-       int64_t size = -1;
+       __u32 nelems = 1;
+       __s64 size = -1;
        int i;
 
-       t = btf_type_by_id(btf, type_id);
+       t = btf__type_by_id(btf, type_id);
        for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
             i++) {
                size = btf_type_size(t);
@@ -259,7 +257,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
                        return -EINVAL;
                }
 
-               t = btf_type_by_id(btf, type_id);
+               t = btf__type_by_id(btf, type_id);
        }
 
        if (size < 0)
@@ -271,9 +269,9 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
        return nelems * size;
 }
 
-int32_t btf__find_by_name(const struct btf *btf, const char *type_name)
+__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
 {
-       uint32_t i;
+       __u32 i;
 
        if (!strcmp(type_name, "void"))
                return 0;
@@ -302,10 +300,9 @@ void btf__free(struct btf *btf)
        free(btf);
 }
 
-struct btf *btf__new(uint8_t *data, uint32_t size,
-                    btf_print_fn_t err_log)
+struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
 {
-       uint32_t log_buf_size = 0;
+       __u32 log_buf_size = 0;
        char *log_buf = NULL;
        struct btf *btf;
        int err;
index 74bb344..caac3a4 100644 (file)
@@ -1,22 +1,24 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: LGPL-2.1 */
 /* Copyright (c) 2018 Facebook */
 
 #ifndef __BPF_BTF_H
 #define __BPF_BTF_H
 
-#include <stdint.h>
+#include <linux/types.h>
 
 #define BTF_ELF_SEC ".BTF"
 
 struct btf;
+struct btf_type;
 
 typedef int (*btf_print_fn_t)(const char *, ...)
        __attribute__((format(printf, 1, 2)));
 
 void btf__free(struct btf *btf);
-struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log);
-int32_t btf__find_by_name(const struct btf *btf, const char *type_name);
-int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id);
+struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
+const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
+__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
 int btf__fd(const struct btf *btf);
 
 #endif
index a1e96b5..1aafdbe 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/list.h>
 #include <linux/limits.h>
 #include <sys/stat.h>
@@ -216,8 +217,8 @@ struct bpf_map {
        size_t offset;
        int map_ifindex;
        struct bpf_map_def def;
-       uint32_t btf_key_type_id;
-       uint32_t btf_value_type_id;
+       __u32 btf_key_type_id;
+       __u32 btf_value_type_id;
        void *priv;
        bpf_map_clear_priv_t clear_priv;
 };
@@ -1014,68 +1015,72 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
 
 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
 {
+       const struct btf_type *container_type;
+       const struct btf_member *key, *value;
        struct bpf_map_def *def = &map->def;
        const size_t max_name = 256;
-       int64_t key_size, value_size;
-       int32_t key_id, value_id;
-       char name[max_name];
+       char container_name[max_name];
+       __s64 key_size, value_size;
+       __s32 container_id;
 
-       /* Find key type by name from BTF */
-       if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
-               pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
+       if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
+           max_name) {
+               pr_warning("map:%s length of '____btf_map_%s' is too long\n",
                           map->name, map->name);
                return -EINVAL;
        }
 
-       key_id = btf__find_by_name(btf, name);
-       if (key_id < 0) {
-               pr_debug("map:%s key_type:%s cannot be found in BTF\n",
-                        map->name, name);
-               return key_id;
+       container_id = btf__find_by_name(btf, container_name);
+       if (container_id < 0) {
+               pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
+                        map->name, container_name);
+               return container_id;
        }
 
-       key_size = btf__resolve_size(btf, key_id);
-       if (key_size < 0) {
-               pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
-                          map->name, name);
-               return key_size;
+       container_type = btf__type_by_id(btf, container_id);
+       if (!container_type) {
+               pr_warning("map:%s cannot find BTF type for container_id:%u\n",
+                          map->name, container_id);
+               return -EINVAL;
        }
 
-       if (def->key_size != key_size) {
-               pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
-                          map->name, name, (unsigned int)key_size, def->key_size);
+       if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
+           BTF_INFO_VLEN(container_type->info) < 2) {
+               pr_warning("map:%s container_name:%s is an invalid container struct\n",
+                          map->name, container_name);
                return -EINVAL;
        }
 
-       /* Find value type from BTF */
-       if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
-               pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
-                         map->name, map->name);
-               return -EINVAL;
+       key = (struct btf_member *)(container_type + 1);
+       value = key + 1;
+
+       key_size = btf__resolve_size(btf, key->type);
+       if (key_size < 0) {
+               pr_warning("map:%s invalid BTF key_type_size\n",
+                          map->name);
+               return key_size;
        }
 
-       value_id = btf__find_by_name(btf, name);
-       if (value_id < 0) {
-               pr_debug("map:%s value_type:%s cannot be found in BTF\n",
-                        map->name, name);
-               return value_id;
+       if (def->key_size != key_size) {
+               pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
+                          map->name, (__u32)key_size, def->key_size);
+               return -EINVAL;
        }
 
-       value_size = btf__resolve_size(btf, value_id);
+       value_size = btf__resolve_size(btf, value->type);
        if (value_size < 0) {
-               pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
-                          map->name, name);
+               pr_warning("map:%s invalid BTF value_type_size\n", map->name);
                return value_size;
        }
 
        if (def->value_size != value_size) {
-               pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
-                          map->name, name, (unsigned int)value_size, def->value_size);
+               pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
+                          map->name, (__u32)value_size, def->value_size);
                return -EINVAL;
        }
 
-       map->btf_key_type_id = key_id;
-       map->btf_value_type_id = value_id;
+       map->btf_key_type_id = key->type;
+       map->btf_value_type_id = value->type;
 
        return 0;
 }
@@ -2089,12 +2094,12 @@ const char *bpf_map__name(struct bpf_map *map)
        return map ? map->name : NULL;
 }
 
-uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
+__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
 {
        return map ? map->btf_key_type_id : 0;
 }
 
-uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
+__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
 {
        return map ? map->btf_value_type_id : 0;
 }
@@ -2268,8 +2273,8 @@ bpf_perf_event_read_simple(void *mem, unsigned long size,
        volatile struct perf_event_mmap_page *header = mem;
        __u64 data_tail = header->data_tail;
        __u64 data_head = header->data_head;
+       int ret = LIBBPF_PERF_EVENT_ERROR;
        void *base, *begin, *end;
-       int ret;
 
        asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
        if (data_head == data_tail)
index 0997653..b33ae02 100644 (file)
@@ -244,8 +244,8 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
 int bpf_map__fd(struct bpf_map *map);
 const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
 const char *bpf_map__name(struct bpf_map *map);
-uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map);
-uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map);
+__u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
+__u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
 
 typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
 int bpf_map__set_priv(struct bpf_map *map, void *priv,
index 63a74c3..e33ef5b 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <string.h>
 
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 
 #include "../../util/intel-pt.h"
index 06bae70..950539f 100644 (file)
@@ -2,6 +2,7 @@
 #include <stdbool.h>
 #include <errno.h>
 
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 
 #include "../../perf.h"
index 60bf119..eafce1a 100644 (file)
@@ -7,6 +7,7 @@ perf-y += futex-wake-parallel.o
 perf-y += futex-requeue.o
 perf-y += futex-lock-pi.o
 
+perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
 perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
 perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
 
index b43f8d2..9ad015a 100644 (file)
@@ -6,6 +6,7 @@
 #define altinstr_replacement text
 #define globl p2align 4; .globl
 #define _ASM_EXTABLE_FAULT(x, y)
+#define _ASM_EXTABLE(x, y)
 
 #include "../../arch/x86/lib/memcpy_64.S"
 /*
diff --git a/tools/perf/bench/mem-memcpy-x86-64-lib.c b/tools/perf/bench/mem-memcpy-x86-64-lib.c
new file mode 100644 (file)
index 0000000..4130734
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
+ * of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
+ * happy.
+ */
+#include <linux/types.h>
+
+unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
+unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
+
+unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
+{
+       for (; len; --len, to++, from++) {
+               /*
+                * Call the assembly routine back directly since
+                * memcpy_mcsafe() may silently fallback to memcpy.
+                */
+               unsigned long rem = __memcpy_mcsafe(to, from, 1);
+
+               if (rem)
+                       break;
+       }
+       return len;
+}
index a1a9795..d215714 100644 (file)
@@ -5,6 +5,7 @@
 #include <time.h>
 #include <stdbool.h>
 #include <linux/types.h>
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 
 extern bool test_attr__enabled;
index 90d4577..6d7fe44 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef __PERF_HEADER_H
 #define __PERF_HEADER_H
 
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 #include <sys/types.h>
 #include <stdbool.h>
index 760558d..cae1a9a 100644 (file)
@@ -10,6 +10,7 @@
 #define __PERF_NAMESPACES_H
 
 #include <sys/types.h>
+#include <linux/stddef.h>
 #include <linux/perf_event.h>
 #include <linux/refcount.h>
 #include <linux/types.h>
index d39e4ff..a6db83a 100644 (file)
@@ -106,7 +106,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3....  The system summary is the average of all CPUs in the system.  Note that these are software, reflecting what was requested.  The hardware counters reflect what was actually achieved.
 \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.  These numbers are from hardware residency counters.
 \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
-\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
 \fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
 \fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.  These numbers are from hardware residency counters.
@@ -114,7 +114,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBCorWatt\fP Watts consumed by the core part of the package.
 \fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
 \fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
-\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.
+\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.  Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system.  Note that the meaning of this field is model specific.  For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits.  Comparing PkgWatt and PkgTmp to system limits is necessary.
 \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
 .fi
 .SH TOO MUCH INFORMATION EXAMPLE
index 4d14bbb..980bd9d 100644 (file)
@@ -1163,9 +1163,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
        if (!printed || !summary_only)
                print_header("\t");
 
-       if (topo.num_cpus > 1)
-               format_counters(&average.threads, &average.cores,
-                       &average.packages);
+       format_counters(&average.threads, &average.cores, &average.packages);
 
        printed = 1;
 
@@ -1692,7 +1690,7 @@ void get_apic_id(struct thread_data *t)
        t->x2apic_id = edx;
 
        if (debug && (t->apic_id != t->x2apic_id))
-               fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+               fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
 }
 
 /*
@@ -2473,55 +2471,43 @@ int get_core_id(int cpu)
 
 void set_node_data(void)
 {
-       char path[80];
-       FILE *filep;
-       int pkg, node, cpu;
-
-       struct pkg_node_info {
-               int count;
-               int min;
-       } *pni;
-
-       pni = calloc(topo.num_packages, sizeof(struct pkg_node_info));
-       if (!pni)
-               err(1, "calloc pkg_node_count");
-
-       for (pkg = 0; pkg < topo.num_packages; pkg++)
-               pni[pkg].min = topo.num_cpus;
-
-       for (node = 0; node <= topo.max_node_num; node++) {
-               /* find the "first" cpu in the node */
-               sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node);
-               filep = fopen(path, "r");
-               if (!filep)
-                       continue;
-               fscanf(filep, "%d", &cpu);
-               fclose(filep);
-
-               pkg = cpus[cpu].physical_package_id;
-               pni[pkg].count++;
-
-               if (node < pni[pkg].min)
-                       pni[pkg].min = node;
-       }
-
-       for (pkg = 0; pkg < topo.num_packages; pkg++)
-               if (pni[pkg].count > topo.nodes_per_pkg)
-                       topo.nodes_per_pkg = pni[0].count;
-
-       /* Fake 1 node per pkg for machines that don't
-        * expose nodes and thus avoid -nan results
-        */
-       if (topo.nodes_per_pkg == 0)
-               topo.nodes_per_pkg = 1;
-
-       for (cpu = 0; cpu < topo.num_cpus; cpu++) {
-               pkg = cpus[cpu].physical_package_id;
-               node = cpus[cpu].physical_node_id;
-               cpus[cpu].logical_node_id = node - pni[pkg].min;
+       int pkg, node, lnode, cpu, cpux;
+       int cpu_count;
+
+       /* initialize logical_node_id */
+       for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
+               cpus[cpu].logical_node_id = -1;
+
+       cpu_count = 0;
+       for (pkg = 0; pkg < topo.num_packages; pkg++) {
+               lnode = 0;
+               for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
+                       if (cpus[cpu].physical_package_id != pkg)
+                               continue;
+                       /* find a cpu with an unset logical_node_id */
+                       if (cpus[cpu].logical_node_id != -1)
+                               continue;
+                       cpus[cpu].logical_node_id = lnode;
+                       node = cpus[cpu].physical_node_id;
+                       cpu_count++;
+                       /*
+                        * find all matching cpus on this pkg and set
+                        * the logical_node_id
+                        */
+                       for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
+                               if ((cpus[cpux].physical_package_id == pkg) &&
+                                  (cpus[cpux].physical_node_id == node)) {
+                                       cpus[cpux].logical_node_id = lnode;
+                                       cpu_count++;
+                               }
+                       }
+                       lnode++;
+                       if (lnode > topo.nodes_per_pkg)
+                               topo.nodes_per_pkg = lnode;
+               }
+               if (cpu_count >= topo.max_cpu_num)
+                       break;
        }
-       free(pni);
-
 }
 
 int get_physical_node_id(struct cpu_topology *thiscpu)
@@ -4471,7 +4457,9 @@ void process_cpuid()
        family = (fms >> 8) & 0xf;
        model = (fms >> 4) & 0xf;
        stepping = fms & 0xf;
-       if (family == 6 || family == 0xf)
+       if (family == 0xf)
+               family += (fms >> 20) & 0xff;
+       if (family >= 6)
                model += ((fms >> 16) & 0xf) << 4;
 
        if (!quiet) {
@@ -4840,16 +4828,8 @@ void topology_probe()
                siblings = get_thread_siblings(&cpus[i]);
                if (siblings > max_siblings)
                        max_siblings = siblings;
-               if (cpus[i].thread_id != -1)
+               if (cpus[i].thread_id == 0)
                        topo.num_cores++;
-
-               if (debug > 1)
-                       fprintf(outf,
-                               "cpu %d pkg %d node %d core %d thread %d\n",
-                               i, cpus[i].physical_package_id,
-                               cpus[i].physical_node_id,
-                               cpus[i].physical_core_id,
-                               cpus[i].thread_id);
        }
 
        topo.cores_per_node = max_core_id + 1;
@@ -4875,6 +4855,20 @@ void topology_probe()
        topo.threads_per_core = max_siblings;
        if (debug > 1)
                fprintf(outf, "max_siblings %d\n", max_siblings);
+
+       if (debug < 1)
+               return;
+
+       for (i = 0; i <= topo.max_cpu_num; ++i) {
+               fprintf(outf,
+                       "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
+                       i, cpus[i].physical_package_id,
+                       cpus[i].physical_node_id,
+                       cpus[i].logical_node_id,
+                       cpus[i].physical_core_id,
+                       cpus[i].thread_id);
+       }
+
 }
 
 void
@@ -5102,7 +5096,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.06.20"
+       fprintf(outf, "turbostat version 18.07.27"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index 7a6214e..a362e3d 100644 (file)
@@ -105,7 +105,7 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
 
 BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
 BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
-BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
 
 ifneq ($(BTF_LLC_PROBE),)
 ifneq ($(BTF_PAHOLE_PROBE),)
index f2f28b6..810de20 100644 (file)
@@ -158,6 +158,15 @@ struct bpf_map_def {
        unsigned int numa_node;
 };
 
+#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)         \
+       struct ____btf_map_##name {                             \
+               type_key key;                                   \
+               type_val value;                                 \
+       };                                                      \
+       struct ____btf_map_##name                               \
+       __attribute__ ((section(".maps." #name), used))         \
+               ____btf_map_##name = { }
+
 static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
        (void *) BPF_FUNC_skb_load_bytes;
 static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
index 3619f30..ffdd277 100644 (file)
@@ -247,6 +247,34 @@ static struct btf_raw_test raw_tests[] = {
        .max_entries = 4,
 },
 
+{
+       .descr = "struct test #3 Invalid member offset",
+       .raw_types = {
+               /* int */                                       /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* int64 */                                     /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8),
+
+               /* struct A { */                                /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16),
+               BTF_MEMBER_ENC(NAME_TBD, 1, 64),        /* int m;               */
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0),         /* int64 n; */
+               /* } */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0m\0n\0",
+       .str_sec_size = sizeof("\0A\0m\0n\0"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_test3_map",
+       .key_size = sizeof(int),
+       .value_size = 16,
+       .key_type_id = 1,
+       .value_type_id = 3,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid member bits_offset",
+},
+
 /* Test member exceeds the size of struct.
  *
  * struct A {
@@ -479,7 +507,7 @@ static struct btf_raw_test raw_tests[] = {
        .key_size = sizeof(int),
        .value_size = sizeof(void *) * 4,
        .key_type_id = 1,
-       .value_type_id = 4,
+       .value_type_id = 5,
        .max_entries = 4,
 },
 
@@ -1264,6 +1292,88 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "type != 0",
 },
 
+{
+       .descr = "arraymap invalid btf key (a bit field)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* 32 bit int with 32 bit offset */     /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 2,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
+{
+       .descr = "arraymap invalid btf key (!= 32 bits)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               /* 16 bit int with 0 bit offset */      /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 2,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
+{
+       .descr = "arraymap invalid btf value (too small)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       /* btf_value_size < map->value_size */
+       .value_size = sizeof(__u64),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
+{
+       .descr = "arraymap invalid btf value (too big)",
+       .raw_types = {
+               /* int */                               /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_map_check_btf",
+       .key_size = sizeof(int),
+       /* btf_value_size > map->value_size */
+       .value_size = sizeof(__u16),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .map_create_err = true,
+},
+
 }; /* struct btf_raw_test raw_tests[] */
 
 static const char *get_next_str(const char *start, const char *end)
@@ -2023,7 +2133,7 @@ static struct btf_raw_test pprint_test = {
                BTF_ENUM_ENC(NAME_TBD, 2),
                BTF_ENUM_ENC(NAME_TBD, 3),
                /* struct pprint_mapv */                /* [16] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 28),
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32),
                BTF_MEMBER_ENC(NAME_TBD, 11, 0),        /* uint32_t ui32 */
                BTF_MEMBER_ENC(NAME_TBD, 10, 32),       /* uint16_t ui16 */
                BTF_MEMBER_ENC(NAME_TBD, 12, 64),       /* int32_t si32 */
index 8c7ca09..b21b876 100644 (file)
@@ -10,11 +10,6 @@ struct ipv_counts {
        unsigned int v6;
 };
 
-typedef int btf_map_key;
-typedef struct ipv_counts btf_map_value;
-btf_map_key dumm_key;
-btf_map_value dummy_value;
-
 struct bpf_map_def SEC("maps") btf_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(int),
@@ -22,6 +17,8 @@ struct bpf_map_def SEC("maps") btf_map = {
        .max_entries = 4,
 };
 
+BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
+
 struct dummy_tracepoint_args {
        unsigned long long pad;
        struct sock *sock;
index 270fa8f..785eabf 100755 (executable)
@@ -115,14 +115,14 @@ ip netns exec ns2 ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o
 ip netns exec ns2 ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
 
 ip netns exec ns3 ip -6 route add fc42::1 dev veth5 via fb00::65
-ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF obj test_lwt_seg6local.o sec add_egr_x dev veth4
+ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
 
-ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF obj test_lwt_seg6local.o sec pop_egr dev veth6
+ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
 ip netns exec ns4 ip -6 addr add fc42::1 dev lo
 ip netns exec ns4 ip -6 route add fd00::3 dev veth7 via fb00::87
 
 ip netns exec ns5 ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
-ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF obj test_lwt_seg6local.o sec inspect_t dev veth8
+ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
 
 ip netns exec ns6 ip -6 addr add fb00::6/16 dev lo
 ip netns exec ns6 ip -6 addr add fd00::4/16 dev lo
index 9e78df2..0c7d9e5 100644 (file)
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
                while (s->bytes_recvd < total_bytes) {
                        if (txmsg_cork) {
                                timeout.tv_sec = 0;
-                               timeout.tv_usec = 1000;
+                               timeout.tv_usec = 300000;
                        } else {
                                timeout.tv_sec = 1;
                                timeout.tv_usec = 0;
index f5f7bcc..41106d9 100644 (file)
@@ -12004,6 +12004,46 @@ static struct bpf_test tests[] = {
                .errstr = "BPF_XADD stores into R2 packet",
                .prog_type = BPF_PROG_TYPE_XDP,
        },
+       {
+               "xadd/w check whether src/dst got mangled, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .retval = 3,
+       },
+       {
+               "xadd/w check whether src/dst got mangled, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .retval = 3,
+       },
        {
                "bpf_get_stack return R0 within range",
                .insns = {
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644 (file)
index 0000000..3b1f45e
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
index 77f7627..e8c5dff 100644 (file)
@@ -402,7 +402,7 @@ int main(int argc, char *argv[])
                exit(1);
        }
 
-       fd = socket(AF_INET6, SOCK_STREAM, 0);
+       fd = socket(cfg_family, SOCK_STREAM, 0);
        if (fd == -1) {
                perror("socket");
                exit(1);
index c15f270..65541c2 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Usage: configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file build-output-dir results-dir
 #
 # Create a .config file from the spec file.  Run from the kernel source tree.
 # Exits with 0 if all went well, with 1 if all went well but the config
@@ -40,20 +40,18 @@ mkdir $T
 
 c=$1
 buildloc=$2
+resdir=$3
 builddir=
-if test -n $buildloc
+if echo $buildloc | grep -q '^O='
 then
-       if echo $buildloc | grep -q '^O='
+       builddir=`echo $buildloc | sed -e 's/^O=//'`
+       if test ! -d $builddir
        then
-               builddir=`echo $buildloc | sed -e 's/^O=//'`
-               if test ! -d $builddir
-               then
-                       mkdir $builddir
-               fi
-       else
-               echo Bad build directory: \"$buildloc\"
-               exit 2
+               mkdir $builddir
        fi
+else
+       echo Bad build directory: \"$buildloc\"
+       exit 2
 fi
 
 sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
@@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
 grep '^grep' < $T/u.sh > $T/upd.sh
 echo "cat - $c" >> $T/upd.sh
 make mrproper
-make $buildloc distclean > $builddir/Make.distclean 2>&1
-make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
+make $buildloc distclean > $resdir/Make.distclean 2>&1
+make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
 mv $builddir/.config $builddir/.config.sav
 sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
 cp $builddir/.config $builddir/.config.new
-yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
+yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
 
 # verify new config matches specification.
 configcheck.sh $builddir/.config $c
index 34d1267..9115fcd 100755 (executable)
@@ -2,7 +2,7 @@
 #
 # Build a kvm-ready Linux kernel from the tree in the current directory.
 #
-# Usage: kvm-build.sh config-template build-dir
+# Usage: kvm-build.sh config-template build-dir resdir
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ then
        exit 1
 fi
 builddir=${2}
+resdir=${3}
 
 T=${TMPDIR-/tmp}/test-linux.sh.$$
 trap 'rm -rf $T' 0
@@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_CONSOLE=y
 ___EOF___
 
-configinit.sh $T/config O=$builddir
+configinit.sh $T/config O=$builddir $resdir
 retval=$?
 if test $retval -gt 1
 then
        exit 2
 fi
 ncpus=`cpus2use.sh`
-make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
+make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
 retval=$?
-if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
+if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
 then
        echo Kernel build error
-       egrep "Stop|Error|error:|warning:" < $builddir/Make.out
+       egrep "Stop|Error|error:|warning:" < $resdir/Make.out
        echo Run aborted.
        exit 3
 fi
index 477ecb1..0fa8a61 100755 (executable)
@@ -70,4 +70,5 @@ else
        else
                print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
        fi
+       echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
 fi
index c27e978..c9bab57 100755 (executable)
@@ -39,6 +39,7 @@ do
                        head -1 $resdir/log
                fi
                TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+               rm -f $i/console.log.*.diags
                kvm-recheck-${TORTURE_SUITE}.sh $i
                if test -f "$i/console.log"
                then
index c5b0f94..f7247ee 100755 (executable)
@@ -98,14 +98,15 @@ then
        ln -s $base_resdir/.config $resdir  # for kvm-recheck.sh
        # Arch-independent indicator
        touch $resdir/builtkernel
-elif kvm-build.sh $T/Kc2 $builddir
+elif kvm-build.sh $T/Kc2 $builddir $resdir
 then
        # Had to build a kernel for this test.
        QEMU="`identify_qemu $builddir/vmlinux`"
        BOOT_IMAGE="`identify_boot_image $QEMU`"
-       cp $builddir/Make*.out $resdir
        cp $builddir/vmlinux $resdir
        cp $builddir/.config $resdir
+       cp $builddir/Module.symvers $resdir > /dev/null || :
+       cp $builddir/System.map $resdir > /dev/null || :
        if test -n "$BOOT_IMAGE"
        then
                cp $builddir/$BOOT_IMAGE $resdir
index 56610db..5a7a62d 100755 (executable)
@@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
        print "needqemurun="
        jn=1
        for (j = first; j < pastlast; j++) {
-               builddir=KVM "/b" jn
+               builddir=KVM "/b1"
                cpusr[jn] = cpus[j];
                if (cfrep[cf[j]] == "") {
                        cfr[jn] = cf[j];
index 1729343..84933f6 100755 (executable)
@@ -163,6 +163,13 @@ then
        print_warning Summary: $summary
        cat $T.diags >> $file.diags
 fi
+for i in $file.*.diags
+do
+       if test -f "$i"
+       then
+               cat $i >> $file.diags
+       fi
+done
 if ! test -s $file.diags
 then
        rm -f $file.diags
index 5d2cc0b..5c3213c 100644 (file)
@@ -1,5 +1,5 @@
-rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
-rcutree.gp_preinit_delay=3
+rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
+rcutree.gp_preinit_delay=12
 rcutree.gp_init_delay=3
 rcutree.gp_cleanup_delay=3
 rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
deleted file mode 100644 (file)
index 883149b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rcutree.rcu_fanout_exact=1
index 24ec910..7bab824 100644 (file)
@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
        if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
        then
                echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
-               echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+               echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
        fi
 }
 
index 95dd146..0f395df 100644 (file)
 
 /******************** Little Endian Handling ********************************/
 
-#define cpu_to_le16(x)  htole16(x)
-#define cpu_to_le32(x)  htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x)  (x)
+#define cpu_to_le32(x)  (x)
+#else
+#define cpu_to_le16(x)  ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x)  \
+       ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >>  8) | \
+       (((x) & 0x0000ff00u) <<  8) | (((x) & 0x000000ffu) << 24))
+#endif
+
 #define le32_to_cpu(x)  le32toh(x)
 #define le16_to_cpu(x)  le16toh(x)
 
-
 /******************** Messages and Errors ***********************************/
 
 static const char argv0[] = "ffs-test";
index 0ac3caf..d0351f8 100644 (file)
@@ -13,8 +13,8 @@
 } while (0);
 /* Weak barriers should be used. If not - it's a bug */
 # define mb() abort()
-# define rmb() abort()
-# define wmb() abort()
+# define dma_rmb() abort()
+# define dma_wmb() abort()
 #else
 #error Please fill in barrier macros
 #endif
index fca8381..fb22bcc 100644 (file)
@@ -52,6 +52,11 @@ static inline void *kmalloc(size_t s, gfp_t gfp)
                return __kmalloc_fake;
        return malloc(s);
 }
+static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp)
+{
+       return kmalloc(n * s, gfp);
+}
+
 static inline void *kzalloc(size_t s, gfp_t gfp)
 {
        void *p = kmalloc(s, gfp);