Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 6 Mar 2018 05:53:44 +0000 (00:53 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 6 Mar 2018 06:20:46 +0000 (01:20 -0500)
All of the conflicts were cases of overlapping changes.

In net/core/devlink.c, we have to make care that the
resouce size_params have become a struct member rather
than a pointer to such an object.

Signed-off-by: David S. Miller <davem@davemloft.net>
486 files changed:
Documentation/PCI/pci.txt
Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt [new file with mode: 0644]
Documentation/devicetree/bindings/eeprom/at24.txt
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/misc/arm-charlcd.txt [deleted file]
Documentation/devicetree/bindings/net/renesas,ravb.txt
Documentation/devicetree/bindings/power/wakeup-source.txt
Documentation/devicetree/bindings/thermal/imx-thermal.txt
Documentation/ia64/serial.txt
Documentation/media/dmx.h.rst.exceptions
Documentation/media/uapi/dvb/dmx-qbuf.rst
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/cpuid.txt
Documentation/virtual/kvm/msr.txt
Documentation/x86/intel_rdt_ui.txt
Documentation/x86/topology.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/cmpxchg.h
arch/alpha/include/asm/xchg.h
arch/arc/Kconfig
arch/arc/boot/dts/axs101.dts
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/haps_hs_idu.dts
arch/arc/boot/dts/nsim_700.dts
arch/arc/boot/dts/nsim_hs.dts
arch/arc/boot/dts/nsim_hs_idu.dts
arch/arc/boot/dts/nsimosci.dts
arch/arc/boot/dts/nsimosci_hs.dts
arch/arc/boot/dts/nsimosci_hs_idu.dts
arch/arc/include/asm/entry-arcv2.h
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/smp.c
arch/arc/kernel/unwind.c
arch/arc/mm/cache.c
arch/arm/boot/dts/bcm11351.dtsi
arch/arm/boot/dts/bcm21664.dtsi
arch/arm/boot/dts/bcm2835.dtsi
arch/arm/boot/dts/bcm2836.dtsi
arch/arm/boot/dts/bcm2837.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/gemini-dlink-dns-313.dts
arch/arm/boot/dts/imx6dl-icore-rqs.dts
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap5-uevm.dts
arch/arm/boot/dts/rk3036.dtsi
arch/arm/boot/dts/rk322x.dtsi
arch/arm/boot/dts/rk3288-phycore-som.dtsi
arch/arm/boot/dts/zx296702.dtsi
arch/arm/configs/omap2plus_defconfig
arch/arm/kernel/time.c
arch/arm/kvm/hyp/Makefile
arch/arm/kvm/hyp/banked-sr.c
arch/arm/mach-clps711x/board-dt.c
arch/arm/mach-davinci/board-dm355-evm.c
arch/arm/mach-davinci/board-dm355-leopard.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-omap1/clock.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-orion5x/Kconfig
arch/arm/mach-orion5x/dns323-setup.c
arch/arm/mach-orion5x/tsx09-common.c
arch/arm/plat-orion/common.c
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/mediatek/mt8173.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/kernel/ptrace.c
arch/ia64/include/asm/atomic.h
arch/ia64/kernel/err_inject.c
arch/ia64/scripts/unwcheck.py
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/processor.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/head.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/smp.c
arch/parisc/kernel/time.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/firmware.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/mm/drmem.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/setup.c
arch/riscv/include/asm/barrier.h
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c
arch/sh/boot/dts/Makefile
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/entry/calling.h
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/apm.h
arch/x86/include/asm/asm-prototypes.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/refcount.h
arch/x86/include/asm/rmwcc.h
arch/x86/include/uapi/asm/hyperv.h
arch/x86/include/uapi/asm/kvm_para.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/head_64.S
arch/x86/kernel/kvm.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/unwind_orc.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/retpoline.S
arch/x86/mm/cpu_entry_area.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/mem_encrypt_boot.S
arch/x86/oprofile/nmi_int.c
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/realmode/rm/trampoline_64.S
arch/x86/xen/enlighten_pv.c
arch/x86/xen/suspend.c
arch/xtensa/kernel/pci-dma.c
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/genhd.c
block/ioctl.c
block/kyber-iosched.c
block/mq-deadline.c
block/partition-generic.c
block/sed-opal.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/brd.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/pktcdvd.c
drivers/block/swim.c
drivers/block/z2ram.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bus/ti-sysc.c
drivers/char/tpm/st33zp24/st33zp24.c
drivers/char/tpm/tpm-interface.c
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm_i2c_infineon.c
drivers/char/tpm/tpm_i2c_nuvoton.c
drivers/char/tpm/tpm_tis_core.c
drivers/clocksource/arc_timer.c
drivers/clocksource/fsl_ftm_timer.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/timer-sun5i.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/scpi-cpufreq.c
drivers/crypto/ccp/psp-dev.c
drivers/dax/super.c
drivers/edac/sb_edac.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/i2c/busses/i2c-octeon-core.c
drivers/i2c/busses/i2c-octeon-core.h
drivers/ide/ide-probe.c
drivers/iommu/intel-svm.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/md-multipath.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid1.h
drivers/md/raid10.c
drivers/md/raid10.h
drivers/md/raid5-log.h
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/Kconfig
drivers/media/common/videobuf2/Kconfig
drivers/media/common/videobuf2/Makefile
drivers/media/common/videobuf2/vb2-trace.c [new file with mode: 0644]
drivers/media/dvb-core/Makefile
drivers/media/dvb-core/dmxdev.c
drivers/media/dvb-core/dvb_demux.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-core/dvb_vb2.c
drivers/media/dvb-frontends/m88ds3103.c
drivers/media/i2c/tvp5150.c
drivers/media/pci/ttpci/av7110.c
drivers/media/pci/ttpci/av7110_av.c
drivers/media/usb/au0828/Kconfig
drivers/media/usb/ttusb-dec/ttusb_dec.c
drivers/media/v4l2-core/Kconfig
drivers/media/v4l2-core/Makefile
drivers/media/v4l2-core/vb2-trace.c [deleted file]
drivers/memory/brcmstb_dpfe.c
drivers/misc/ocxl/file.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc-k3.c
drivers/mmc/host/dw_mmc-rockchip.c
drivers/mmc/host/dw_mmc-zx.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/sdhci-pci-core.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/wan/hdlc_ppp.c
drivers/net/xen-netfront.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/multipath.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/pci/setup-res.c
drivers/pinctrl/meson/pinctrl-meson-axg.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/wmi.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/soc/imx/gpc.c
drivers/vfio/vfio_iommu_type1.c
drivers/watchdog/Kconfig
drivers/xen/events/events_base.c
drivers/xen/pvcalls-back.c
drivers/xen/pvcalls-front.c
fs/block_dev.c
fs/btrfs/ctree.h
fs/btrfs/inode-item.c
fs/btrfs/inode.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/super.c
fs/ceph/super.h
fs/direct-io.c
fs/nfs/callback_proc.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4client.c
fs/xfs/scrub/agheader.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_rmap_item.c
fs/xfs/xfs_super.c
include/drm/drm_drv.h
include/linux/bio.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/fs.h
include/linux/genhd.h
include/linux/init.h
include/linux/jump_label.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/mutex.h
include/linux/nospec.h
include/linux/phy.h
include/linux/skbuff.h
include/media/demux.h
include/media/dmxdev.h
include/media/dvb_demux.h
include/media/dvb_vb2.h
include/net/devlink.h
include/soc/arc/mcip.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/blktrace_api.h
include/uapi/linux/dvb/dmx.h
include/uapi/linux/kvm.h
include/uapi/linux/psp-sev.h
init/main.c
kernel/bpf/verifier.c
kernel/extable.c
kernel/irq/matrix.c
kernel/jump_label.c
kernel/memremap.c
kernel/printk/printk.c
kernel/time/timer.c
lib/dma-debug.c
lib/idr.c
lib/test_bpf.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/fragmentation.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bridge/br_netfilter_hooks.c
net/bridge/br_vlan.c
net/bridge/netfilter/ebt_among.c
net/bridge/netfilter/ebtables.c
net/ceph/ceph_common.c
net/core/dev.c
net/core/devlink.c
net/core/ethtool.c
net/core/skbuff.c
net/ipv4/ip_forward.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/nf_flow_table_ipv4.c
net/ipv4/route.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/xfrm4_output.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nf_flow_table_ipv6.c
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/sit.c
net/ipv6/xfrm6_output.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mpls/af_mpls.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/nf_tables_api.c
net/qrtr/smd.c
net/rds/tcp_listen.c
net/sched/sch_tbf.c
net/smc/af_smc.c
net/smc/smc_cdc.c
net/smc/smc_core.c
net/smc/smc_llc.c
net/tipc/group.c
net/tipc/socket.c
net/tls/tls_main.c
net/wireless/Kconfig
net/xfrm/xfrm_device.c
samples/seccomp/Makefile
scripts/Makefile.build
scripts/coccinelle/api/memdup.cocci
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/kconfig/kxgettext.c
scripts/kconfig/lkc.h
scripts/kconfig/lxdialog/check-lxdialog.sh
scripts/kconfig/menu.c
scripts/kconfig/symbol.c
scripts/kconfig/util.c
scripts/kconfig/zconf.l
scripts/kconfig/zconf.y
scripts/link-vmlinux.sh
sound/core/control.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks-table.h
sound/x86/intel_hdmi_audio.c
tools/kvm/kvm_stat/kvm_stat
tools/kvm/kvm_stat/kvm_stat.txt
tools/objtool/builtin-check.c
tools/objtool/builtin-orc.c
tools/objtool/builtin.h
tools/objtool/check.c
tools/objtool/check.h
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux.c
tools/testing/radix-tree/linux/compiler_types.h [new file with mode: 0644]
tools/testing/radix-tree/linux/gfp.h
tools/testing/radix-tree/linux/slab.h
tools/testing/selftests/android/Makefile
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/futex/Makefile
tools/testing/selftests/memfd/config [new file with mode: 0644]
tools/testing/selftests/memory-hotplug/Makefile
tools/testing/selftests/pstore/config
tools/testing/selftests/sync/Makefile
tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
tools/testing/selftests/vDSO/Makefile
tools/testing/selftests/vm/.gitignore
virt/kvm/arm/arch_timer.c
virt/kvm/kvm_main.c

index 611a75e..badb26a 100644 (file)
@@ -570,7 +570,9 @@ your driver if they're helpful, or just use plain hex constants.
 The device IDs are arbitrary hex numbers (vendor controlled) and normally used
 only in a single location, the pci_device_id table.
 
-Please DO submit new vendor/device IDs to http://pciids.sourceforge.net/.
+Please DO submit new vendor/device IDs to http://pci-ids.ucw.cz/.
+There are mirrors of the pci.ids file at http://pciids.sourceforge.net/
+and https://github.com/pciutils/pciids.
 
 
 
diff --git a/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
new file mode 100644 (file)
index 0000000..e28e2aa
--- /dev/null
@@ -0,0 +1,18 @@
+ARM Versatile Character LCD
+-----------------------------------------------------
+This binding defines the character LCD interface found on ARM Versatile AB
+and PB reference platforms.
+
+Required properties:
+- compatible : "arm,versatile-clcd"
+- reg : Location and size of character LCD registers
+
+Optional properties:
+- interrupts - single interrupt for character LCD. The character LCD can
+  operate in polled mode without an interrupt.
+
+Example:
+       lcd@10008000 {
+               compatible = "arm,versatile-lcd";
+               reg = <0x10008000 0x1000>;
+       };
index 1812c84..abfae1b 100644 (file)
@@ -38,9 +38,9 @@ Required properties:
 
                 "catalyst",
                 "microchip",
+                "nxp",
                 "ramtron",
                 "renesas",
-                "nxp",
                 "st",
 
                 Some vendors use different model names for chips which are just
index 33c9a10..20f121d 100644 (file)
@@ -14,6 +14,7 @@ Required properties:
     - "renesas,irqc-r8a7794" (R-Car E2)
     - "renesas,intc-ex-r8a7795" (R-Car H3)
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
+    - "renesas,intc-ex-r8a77965" (R-Car M3-N)
     - "renesas,intc-ex-r8a77970" (R-Car V3M)
     - "renesas,intc-ex-r8a77995" (R-Car D3)
 - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
diff --git a/Documentation/devicetree/bindings/misc/arm-charlcd.txt b/Documentation/devicetree/bindings/misc/arm-charlcd.txt
deleted file mode 100644 (file)
index e28e2aa..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-ARM Versatile Character LCD
------------------------------------------------------
-This binding defines the character LCD interface found on ARM Versatile AB
-and PB reference platforms.
-
-Required properties:
-- compatible : "arm,versatile-clcd"
-- reg : Location and size of character LCD registers
-
-Optional properties:
-- interrupts - single interrupt for character LCD. The character LCD can
-  operate in polled mode without an interrupt.
-
-Example:
-       lcd@10008000 {
-               compatible = "arm,versatile-lcd";
-               reg = <0x10008000 0x1000>;
-       };
index c902261..92fd4b2 100644 (file)
@@ -18,6 +18,7 @@ Required properties:
       - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
       - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
       - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
+      - "renesas,etheravb-r8a77980" for the R8A77980 SoC.
       - "renesas,etheravb-r8a77995" for the R8A77995 SoC.
       - "renesas,etheravb-rcar-gen3" as a fallback for the above
                R-Car Gen3 devices.
index 3c81f78..5d254ab 100644 (file)
@@ -60,7 +60,7 @@ Examples
                #size-cells = <0>;
 
                button@1 {
-                       debounce_interval = <50>;
+                       debounce-interval = <50>;
                        wakeup-source;
                        linux,code = <116>;
                        label = "POWER";
index 28be51a..379eb76 100644 (file)
@@ -22,7 +22,32 @@ Optional properties:
 - clocks : thermal sensor's clock source.
 
 Example:
+ocotp: ocotp@21bc000 {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "fsl,imx6sx-ocotp", "syscon";
+       reg = <0x021bc000 0x4000>;
+       clocks = <&clks IMX6SX_CLK_OCOTP>;
 
+       tempmon_calib: calib@38 {
+               reg = <0x38 4>;
+       };
+
+       tempmon_temp_grade: temp-grade@20 {
+               reg = <0x20 4>;
+       };
+};
+
+tempmon: tempmon {
+       compatible = "fsl,imx6sx-tempmon", "fsl,imx6q-tempmon";
+       interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+       fsl,tempmon = <&anatop>;
+       nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+       nvmem-cell-names = "calib", "temp_grade";
+       clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
+};
+
+Legacy method (Deprecated):
 tempmon {
        compatible = "fsl,imx6q-tempmon";
        fsl,tempmon = <&anatop>;
index 6869c73..a63d2c5 100644 (file)
@@ -111,7 +111,7 @@ TROUBLESHOOTING SERIAL CONSOLE PROBLEMS
 
        - If you don't have an HCDP, the kernel doesn't know where
          your console lives until the driver discovers serial
-         devices.  Use "console=uart, io,0x3f8" (or appropriate
+         devices.  Use "console=uart,io,0x3f8" (or appropriate
          address for your machine).
 
     Kernel and init script output works fine, but no "login:" prompt:
index 63f55a9..a8c4239 100644 (file)
@@ -50,9 +50,15 @@ replace typedef dmx_filter_t :c:type:`dmx_filter`
 replace typedef dmx_pes_type_t :c:type:`dmx_pes_type`
 replace typedef dmx_input_t :c:type:`dmx_input`
 
-ignore symbol DMX_OUT_DECODER
-ignore symbol DMX_OUT_TAP
-ignore symbol DMX_OUT_TS_TAP
-ignore symbol DMX_OUT_TSDEMUX_TAP
+replace symbol DMX_BUFFER_FLAG_HAD_CRC32_DISCARD :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_FLAG_TEI :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_PKT_COUNTER_MISMATCH :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR :c:type:`dmx_buffer_flags`
+
+replace symbol DMX_OUT_DECODER :c:type:`dmx_output`
+replace symbol DMX_OUT_TAP :c:type:`dmx_output`
+replace symbol DMX_OUT_TS_TAP :c:type:`dmx_output`
+replace symbol DMX_OUT_TSDEMUX_TAP :c:type:`dmx_output`
 
 replace ioctl DMX_DQBUF dmx_qbuf
index b48c493..be5a4c6 100644 (file)
@@ -51,9 +51,10 @@ out to disk. Buffers remain locked until dequeued, until the
 the device is closed.
 
 Applications call the ``DMX_DQBUF`` ioctl to dequeue a filled
-(capturing) buffer from the driver's outgoing queue. They just set the ``reserved`` field array to zero. When ``DMX_DQBUF`` is called with a
-pointer to this structure, the driver fills the remaining fields or
-returns an error code.
+(capturing) buffer from the driver's outgoing queue.
+They just set the ``index`` field withe the buffer ID to be queued.
+When ``DMX_DQBUF`` is called with a pointer to struct :c:type:`dmx_buffer`,
+the driver fills the remaining fields or returns an error code.
 
 By default ``DMX_DQBUF`` blocks when no buffer is in the outgoing
 queue. When the ``O_NONBLOCK`` flag was given to the
index 792fa87..d6b3ff5 100644 (file)
@@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
 flag KVM_VM_MIPS_VZ.
 
 
-4.3 KVM_GET_MSR_INDEX_LIST
+4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
 
-Capability: basic
+Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
 Architectures: x86
-Type: system
+Type: system ioctl
 Parameters: struct kvm_msr_list (in/out)
 Returns: 0 on success; -1 on error
 Errors:
+  EFAULT:    the msr index list cannot be read from or written to
   E2BIG:     the msr index list is to be to fit in the array specified by
              the user.
 
@@ -139,16 +140,23 @@ struct kvm_msr_list {
        __u32 indices[0];
 };
 
-This ioctl returns the guest msrs that are supported.  The list varies
-by kvm version and host processor, but does not change otherwise.  The
-user fills in the size of the indices array in nmsrs, and in return
-kvm adjusts nmsrs to reflect the actual number of msrs and fills in
-the indices array with their numbers.
+The user fills in the size of the indices array in nmsrs, and in return
+kvm adjusts nmsrs to reflect the actual number of msrs and fills in the
+indices array with their numbers.
+
+KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported.  The list
+varies by kvm version and host processor, but does not change otherwise.
 
 Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
 not returned in the MSR list, as different vcpus can have a different number
 of banks, as set via the KVM_X86_SETUP_MCE ioctl.
 
+KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed
+to the KVM_GET_MSRS system ioctl.  This lets userspace probe host capabilities
+and processor features that are exposed via MSRs (e.g., VMX capabilities).
+This list also varies by kvm version and host processor, but does not change
+otherwise.
+
 
 4.4 KVM_CHECK_EXTENSION
 
@@ -475,14 +483,22 @@ Support for this has been removed.  Use KVM_SET_GUEST_DEBUG instead.
 
 4.18 KVM_GET_MSRS
 
-Capability: basic
+Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system)
 Architectures: x86
-Type: vcpu ioctl
+Type: system ioctl, vcpu ioctl
 Parameters: struct kvm_msrs (in/out)
-Returns: 0 on success, -1 on error
+Returns: number of msrs successfully returned;
+        -1 on error
+
+When used as a system ioctl:
+Reads the values of MSR-based features that are available for the VM.  This
+is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values.
+The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST
+in a system ioctl.
 
+When used as a vcpu ioctl:
 Reads model-specific registers from the vcpu.  Supported msr indices can
-be obtained using KVM_GET_MSR_INDEX_LIST.
+be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl.
 
 struct kvm_msrs {
        __u32 nmsrs; /* number of msrs in entries */
index dcab6dc..87a7506 100644 (file)
@@ -58,6 +58,10 @@ KVM_FEATURE_PV_TLB_FLUSH           ||     9 || guest checks this feature bit
                                    ||       || before enabling paravirtualized
                                    ||       || tlb flush.
 ------------------------------------------------------------------------------
+KVM_FEATURE_ASYNC_PF_VMEXIT        ||    10 || paravirtualized async PF VM exit
+                                   ||       || can be enabled by setting bit 2
+                                   ||       || when writing to msr 0x4b564d02
+------------------------------------------------------------------------------
 KVM_FEATURE_CLOCKSOURCE_STABLE_BIT ||    24 || host will warn if no guest-side
                                    ||       || per-cpu warps are expected in
                                    ||       || kvmclock.
index 1ebecc1..f3f0d57 100644 (file)
@@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
        when asynchronous page faults are enabled on the vcpu 0 when
        disabled. Bit 1 is 1 if asynchronous page faults can be injected
        when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
-       are delivered to L1 as #PF vmexits.
+       are delivered to L1 as #PF vmexits.  Bit 2 can be set only if
+       KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID.
 
        First 4 byte of 64 byte memory location will be written to by
        the hypervisor at the time of asynchronous page fault (APF)
index 756fd76..71c3098 100644 (file)
@@ -671,7 +671,7 @@ occupancy of the real time threads on these cores.
 # mkdir p1
 
 Move the cpus 4-7 over to p1
-# echo f0 > p0/cpus
+# echo f0 > p1/cpus
 
 View the llc occupancy snapshot
 
index f3e9d7e..2953e3e 100644 (file)
@@ -108,7 +108,7 @@ The topology of a system is described in the units of:
 
     The number of online threads is also printed in /proc/cpuinfo "siblings."
 
-  - topology_sibling_mask():
+  - topology_sibling_cpumask():
 
     The cpumask contains all online threads in the core to which a thread
     belongs.
index e0b3900..9d632b8 100644 (file)
@@ -1238,7 +1238,7 @@ F:        drivers/clk/at91
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
-M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
+M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.linux4sam.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
@@ -1590,7 +1590,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
-M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Gregory Clement <gregory.clement@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/soc/dove/
@@ -1604,7 +1604,7 @@ F:        arch/arm/boot/dts/orion5x*
 ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Gregory Clement <gregory.clement@bootlin.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -1999,8 +1999,10 @@ M:       Maxime Coquelin <mcoquelin.stm32@gmail.com>
 M:     Alexandre Torgue <alexandre.torgue@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next
 N:     stm32
+F:     arch/arm/boot/dts/stm32*
+F:     arch/arm/mach-stm32/
 F:     drivers/clocksource/armv7m_systick.c
 
 ARM/TANGO ARCHITECTURE
@@ -7600,8 +7602,10 @@ F:       mm/kasan/
 F:     scripts/Makefile.kasan
 
 KCONFIG
+M:     Masahiro Yamada <yamada.masahiro@socionext.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig
 L:     linux-kbuild@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/kbuild/kconfig-language.txt
 F:     scripts/kconfig/
 
@@ -10935,6 +10939,17 @@ L:     linux-gpio@vger.kernel.org
 S:     Supported
 F:     drivers/pinctrl/pinctrl-at91-pio4.*
 
+PIN CONTROLLER - FREESCALE
+M:     Dong Aisheng <aisheng.dong@nxp.com>
+M:     Fabio Estevam <festevam@gmail.com>
+M:     Shawn Guo <shawnguo@kernel.org>
+M:     Stefan Agner <stefan@agner.ch>
+R:     Pengutronix Kernel Team <kernel@pengutronix.de>
+L:     linux-gpio@vger.kernel.org
+S:     Maintained
+F:     drivers/pinctrl/freescale/
+F:     Documentation/devicetree/bindings/pinctrl/fsl,*
+
 PIN CONTROLLER - INTEL
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Heikki Krogerus <heikki.krogerus@linux.intel.com>
index d9cf3a4..c4322de 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -388,7 +388,7 @@ PYTHON              = python
 CHECK          = sparse
 
 CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-                 -Wbitwise -Wno-return-void $(CF)
+                 -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
 NOSTDINC_FLAGS  =
 CFLAGS_MODULE   =
 AFLAGS_MODULE   =
@@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 endif
 
+RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
+export RETPOLINE_CFLAGS
+
 ifeq ($(config-targets),1)
 # ===========================================================================
 # *config targets only - make sure prerequisites are updated, and descend
@@ -579,10 +584,9 @@ ifeq ($(KBUILD_EXTMOD),)
 # To avoid any implicit rule to kick in, define an empty command
 $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
 
-# If .config is newer than include/config/auto.conf, someone tinkered
-# with it and forgot to run make oldconfig.
-# if auto.conf.cmd is missing then we are probably in a cleaned tree so
-# we execute the config step to be sure to catch updated Kconfig files
+# The actual configuration files used during the build are stored in
+# include/generated/ and include/config/. Update them if .config is newer than
+# include/config/auto.conf (which mirrors .config).
 include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
        $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
 else
@@ -857,8 +861,7 @@ KBUILD_AFLAGS   += $(ARCH_AFLAGS)   $(KAFLAGS)
 KBUILD_CFLAGS   += $(ARCH_CFLAGS)   $(KCFLAGS)
 
 # Use --build-id when available.
-LDFLAGS_BUILD_ID := $(patsubst -Wl$(comma)%,%,\
-                             $(call cc-ldoption, -Wl$(comma)--build-id,))
+LDFLAGS_BUILD_ID := $(call ld-option, --build-id)
 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
 
index 46ebf14..8a2b331 100644 (file)
@@ -6,7 +6,6 @@
  * Atomic exchange routines.
  */
 
-#define __ASM__MB
 #define ____xchg(type, args...)                __xchg ## type ## _local(args)
 #define ____cmpxchg(type, args...)     __cmpxchg ## type ## _local(args)
 #include <asm/xchg.h>
        cmpxchg_local((ptr), (o), (n));                                 \
 })
 
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB      "\tmb\n"
-#endif
 #undef ____xchg
 #undef ____cmpxchg
 #define ____xchg(type, args...)                __xchg ##type(args)
@@ -64,7 +59,6 @@
        cmpxchg((ptr), (o), (n));                                       \
 })
 
-#undef __ASM__MB
 #undef ____cmpxchg
 
 #endif /* _ALPHA_CMPXCHG_H */
index 68dfb3c..e2b59fa 100644 (file)
  * Atomic exchange.
  * Since it can be used to implement critical sections
  * it must clobber "memory" (also for interrupts in UP).
+ *
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
  */
 
 static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
 {
        unsigned long ret, tmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %4,7,%3\n"
        "       insbl   %1,%4,%1\n"
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%3)\n"
        "       beq     %2,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br      1b\n"
        ".previous"
        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
        : "r" ((long)m), "1" (val) : "memory");
+       smp_mb();
 
        return ret;
 }
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
 {
        unsigned long ret, tmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %4,7,%3\n"
        "       inswl   %1,%4,%1\n"
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%3)\n"
        "       beq     %2,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br      1b\n"
        ".previous"
        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
        : "r" ((long)m), "1" (val) : "memory");
+       smp_mb();
 
        return ret;
 }
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
 {
        unsigned long dummy;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldl_l %0,%4\n"
        "       bis $31,%3,%1\n"
        "       stl_c %1,%2\n"
        "       beq %1,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br 1b\n"
        ".previous"
        : "=&r" (val), "=&r" (dummy), "=m" (*m)
        : "rI" (val), "m" (*m) : "memory");
+       smp_mb();
 
        return val;
 }
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
 {
        unsigned long dummy;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldq_l %0,%4\n"
        "       bis $31,%3,%1\n"
        "       stq_c %1,%2\n"
        "       beq %1,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br 1b\n"
        ".previous"
        : "=&r" (val), "=&r" (dummy), "=m" (*m)
        : "rI" (val), "m" (*m) : "memory");
+       smp_mb();
 
        return val;
 }
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
  * store NEW in MEM.  Return the initial value in MEM.  Success is
  * indicated by comparing RETURN with OLD.
  *
- * The memory barrier should be placed in SMP only when we actually
- * make the change. If we don't change anything (so if the returned
- * prev is equal to old) then we aren't acquiring anything new and
- * we don't need any memory barrier as far I can tell.
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
+ * The trailing memory barrier is placed in SMP unconditionally, in
+ * order to guarantee that dependency ordering is preserved when a
+ * dependency is headed by an unsuccessful operation.
  */
 
 static inline unsigned long
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
 {
        unsigned long prev, tmp, cmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %5,7,%4\n"
        "       insbl   %1,%5,%1\n"
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%4)\n"
        "       beq     %2,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
 {
        unsigned long prev, tmp, cmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %5,7,%4\n"
        "       inswl   %1,%5,%1\n"
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%4)\n"
        "       beq     %2,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
 {
        unsigned long prev, cmp;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldl_l %0,%5\n"
        "       cmpeq %0,%3,%1\n"
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
        "       mov %4,%1\n"
        "       stl_c %1,%2\n"
        "       beq %1,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br 1b\n"
        ".previous"
        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
        : "r"((long) old), "r"(new), "m"(*m) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
 {
        unsigned long prev, cmp;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldq_l %0,%5\n"
        "       cmpeq %0,%3,%1\n"
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
        "       mov %4,%1\n"
        "       stq_c %1,%2\n"
        "       beq %1,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br 1b\n"
        ".previous"
        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
        : "r"((long) old), "r"(new), "m"(*m) : "memory");
+       smp_mb();
 
        return prev;
 }
index f3a80cf..d76bf4a 100644 (file)
@@ -484,7 +484,6 @@ config ARC_CURR_IN_REG
 
 config ARC_EMUL_UNALIGNED
        bool "Emulate unaligned memory access (userspace only)"
-       default N
        select SYSCTL_ARCH_UNALIGN_NO_WARN
        select SYSCTL_ARCH_UNALIGN_ALLOW
        depends on ISA_ARCOMPACT
index 70aec7d..626b694 100644 (file)
@@ -17,6 +17,6 @@
        compatible = "snps,axs101", "snps,arc-sdp";
 
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60";
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60 print-fatal-signals=1";
        };
 };
index 74d070c..47b74fb 100644 (file)
                        };
 
                        eeprom@0x54{
-                               compatible = "24c01";
+                               compatible = "atmel,24c01";
                                reg = <0x54>;
                                pagesize = <0x8>;
                        };
 
                        eeprom@0x57{
-                               compatible = "24c04";
+                               compatible = "atmel,24c04";
                                reg = <0x57>;
                                pagesize = <0x8>;
                        };
index 215cddd..0c60330 100644 (file)
@@ -22,7 +22,7 @@
        };
 
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
        };
 
        aliases {
index 5ee96b0..ff2f2c7 100644 (file)
@@ -17,7 +17,7 @@
        interrupt-parent = <&core_intc>;
 
        chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
        };
 
        aliases {
index 8d787b2..8e2489b 100644 (file)
@@ -24,7 +24,7 @@
        };
 
        chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
        };
 
        aliases {
index 4f98ebf..ed12f49 100644 (file)
@@ -15,7 +15,7 @@
        interrupt-parent = <&core_intc>;
 
        chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
        };
 
        aliases {
index 3c391ba..7842e5e 100644 (file)
@@ -20,7 +20,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
        };
 
        aliases {
index 14a727c..b8838cf 100644 (file)
@@ -20,7 +20,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
        };
 
        aliases {
index 5052917..72a2c72 100644 (file)
@@ -18,7 +18,7 @@
 
        chosen {
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24 print-fatal-signals=1";
        };
 
        aliases {
index 257a68f..309f4e6 100644 (file)
 .macro FAKE_RET_FROM_EXCPN
        lr      r9, [status32]
        bic     r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
-       or      r9, r9, (STATUS_L_MASK|STATUS_IE_MASK)
+       or      r9, r9, STATUS_IE_MASK
        kflag   r9
 .endm
 
index f61a52b..5fe84e4 100644 (file)
@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
 
 static char smp_cpuinfo_buf[128];
 
+/*
+ * Set mask to halt GFRC if any online core in SMP cluster is halted.
+ * Only works for ARC HS v3.0+, on earlier versions has no effect.
+ */
+static void mcip_update_gfrc_halt_mask(int cpu)
+{
+       struct bcr_generic gfrc;
+       unsigned long flags;
+       u32 gfrc_halt_mask;
+
+       READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
+
+       /*
+        * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
+        * GFRC 0x3 version.
+        */
+       if (gfrc.ver < 0x3)
+               return;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       __mcip_cmd(CMD_GFRC_READ_CORE, 0);
+       gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+       gfrc_halt_mask |= BIT(cpu);
+       __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_update_debug_halt_mask(int cpu)
+{
+       u32 mcip_mask = 0;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       /*
+        * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
+        * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
+        * and CMD_DEBUG_READ_SELECT.
+        */
+       __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
+       mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+       mcip_mask |= BIT(cpu);
+
+       __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
+       /*
+        * Parameter specified halt cause:
+        * STATUS32[H]/actionpoint/breakpoint/self-halt
+        * We choose all of them (0xF).
+        */
+       __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
 static void mcip_setup_per_cpu(int cpu)
 {
+       struct mcip_bcr mp;
+
+       READ_BCR(ARC_REG_MCIP_BCR, mp);
+
        smp_ipi_irq_setup(cpu, IPI_IRQ);
        smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
+
+       /* Update GFRC halt mask as new CPU came online */
+       if (mp.gfrc)
+               mcip_update_gfrc_halt_mask(cpu);
+
+       /* Update MCIP debug mask as new CPU came online */
+       if (mp.dbg)
+               mcip_update_debug_halt_mask(cpu);
 }
 
 static void mcip_ipi_send(int cpu)
@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void)
                IS_AVAIL1(mp.gfrc, "GFRC"));
 
        cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
-
-       if (mp.dbg) {
-               __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
-               __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
-       }
 }
 
 struct plat_smp_ops plat_smp_ops = {
index 9d27331..b2cae79 100644 (file)
@@ -51,7 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
        { 0x51, "R2.0" },
        { 0x52, "R2.1" },
        { 0x53, "R3.0" },
-       { 0x54, "R4.0" },
+       { 0x54, "R3.10a" },
 #endif
        { 0x00, NULL   }
 };
@@ -373,7 +373,7 @@ static void arc_chk_core_config(void)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        int saved = 0, present = 0;
-       char *opt_nm = NULL;;
+       char *opt_nm = NULL;
 
        if (!cpu->extn.timer0)
                panic("Timer0 is not present!\n");
index efe8b42..21d86c3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/reboot.h>
 #include <linux/irqdomain.h>
 #include <linux/export.h>
+#include <linux/of_fdt.h>
 
 #include <asm/processor.h>
 #include <asm/setup.h>
@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void)
 {
 }
 
+static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
+{
+       unsigned long dt_root = of_get_flat_dt_root();
+       const char *buf;
+
+       buf = of_get_flat_dt_prop(dt_root, name, NULL);
+       if (!buf)
+               return -EINVAL;
+
+       if (cpulist_parse(buf, cpumask))
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * Read from DeviceTree and setup cpu possible mask. If there is no
+ * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
+ */
+static void __init arc_init_cpu_possible(void)
+{
+       struct cpumask cpumask;
+
+       if (arc_get_cpu_map("possible-cpus", &cpumask)) {
+               pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
+                       NR_CPUS);
+
+               cpumask_setall(&cpumask);
+       }
+
+       if (!cpumask_test_cpu(0, &cpumask))
+               panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
+
+       init_cpu_possible(&cpumask);
+}
+
 /*
  * Called from setup_arch() before calling setup_processor()
  *
@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void)
  */
 void __init smp_init_cpus(void)
 {
-       unsigned int i;
-
-       for (i = 0; i < NR_CPUS; i++)
-               set_cpu_possible(i, true);
+       arc_init_cpu_possible();
 
        if (plat_smp_ops.init_early_smp)
                plat_smp_ops.init_early_smp();
@@ -70,16 +104,12 @@ void __init smp_init_cpus(void)
 /* called from init ( ) =>  process 1 */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
        /*
         * if platform didn't set the present map already, do it now
         * boot cpu is set to present already by init/main.c
         */
-       if (num_present_cpus() <= 1) {
-               for (i = 0; i < max_cpus; i++)
-                       set_cpu_present(i, true);
-       }
+       if (num_present_cpus() <= 1)
+               init_cpu_present(cpu_possible_mask);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
index 333daab..183391d 100644 (file)
@@ -366,7 +366,7 @@ static void init_unwind_hdr(struct unwind_table *table,
        return;
 
 ret_err:
-       panic("Attention !!! Dwarf FDE parsing errors\n");;
+       panic("Attention !!! Dwarf FDE parsing errors\n");
 }
 
 #ifdef CONFIG_MODULES
index eee924d..2072f34 100644 (file)
@@ -780,7 +780,10 @@ noinline static void slc_entire_op(const int op)
 
        write_aux_reg(r, ctrl);
 
-       write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+       if (op & OP_INV)        /* Inv or flush-n-inv use same cmd reg */
+               write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
+       else
+               write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
 
        /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
        read_aux_reg(r);
index 18045c3..db7cded 100644 (file)
@@ -55,7 +55,7 @@
                      <0x3ff00100 0x100>;
        };
 
-       smc@0x3404c000 {
+       smc@3404c000 {
                compatible = "brcm,bcm11351-smc", "brcm,kona-smc";
                reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */
        };
index 6dde95f..266f261 100644 (file)
@@ -55,7 +55,7 @@
                      <0x3ff00100 0x100>;
        };
 
-       smc@0x3404e000 {
+       smc@3404e000 {
                compatible = "brcm,bcm21664-smc", "brcm,kona-smc";
                reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */
        };
index 0e3d2a5..a5c3824 100644 (file)
        soc {
                ranges = <0x7e000000 0x20000000 0x02000000>;
                dma-ranges = <0x40000000 0x00000000 0x20000000>;
+       };
 
-               arm-pmu {
-                       compatible = "arm,arm1176-pmu";
-               };
+       arm-pmu {
+               compatible = "arm,arm1176-pmu";
        };
 };
 
index 1dfd764..c933e84 100644 (file)
@@ -9,19 +9,19 @@
                         <0x40000000 0x40000000 0x00001000>;
                dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
 
-               local_intc: local_intc {
+               local_intc: local_intc@40000000 {
                        compatible = "brcm,bcm2836-l1-intc";
                        reg = <0x40000000 0x100>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
                        interrupt-parent = <&local_intc>;
                };
+       };
 
-               arm-pmu {
-                       compatible = "arm,cortex-a7-pmu";
-                       interrupt-parent = <&local_intc>;
-                       interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
-               };
+       arm-pmu {
+               compatible = "arm,cortex-a7-pmu";
+               interrupt-parent = <&local_intc>;
+               interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        timer {
index efa7d33..7704bb0 100644 (file)
@@ -8,7 +8,7 @@
                         <0x40000000 0x40000000 0x00001000>;
                dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
 
-               local_intc: local_intc {
+               local_intc: local_intc@40000000 {
                        compatible = "brcm,bcm2836-l1-intc";
                        reg = <0x40000000 0x100>;
                        interrupt-controller;
index 18db25a..9d293de 100644 (file)
                        status = "disabled";
                };
 
-               aux: aux@0x7e215000 {
+               aux: aux@7e215000 {
                        compatible = "brcm,bcm2835-aux";
                        #clock-cells = <1>;
                        reg = <0x7e215000 0x8>;
index 6a44b80..f0e2008 100644 (file)
@@ -49,7 +49,7 @@
 
        memory {
                device_type = "memory";
-               reg = <0x60000000 0x80000000>;
+               reg = <0x60000000 0x20000000>;
        };
 
        gpio-restart {
index 08568ce..da8bb9d 100644 (file)
 
                sata: sata@46000000 {
                        /* The ROM uses this muxmode */
-                       cortina,gemini-ata-muxmode = <3>;
+                       cortina,gemini-ata-muxmode = <0>;
                        cortina,gemini-enable-sata-bridge;
                        status = "okay";
                };
index cf42c2f..1281bc3 100644 (file)
@@ -42,7 +42,7 @@
 
 /dts-v1/;
 
-#include "imx6q.dtsi"
+#include "imx6dl.dtsi"
 #include "imx6qdl-icore-rqs.dtsi"
 
 / {
index c1aa7a4..a30ee9f 100644 (file)
@@ -71,6 +71,8 @@
 };
 
 &i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <2600000>;
 
        twl: twl@48 {
                >;
        };
 
-
+       i2c1_pins: pinmux_i2c1_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
+                       OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
+               >;
+       };
 };
 
 &omap3_pmx_wkup {
index b50b796..4791544 100644 (file)
@@ -66,6 +66,8 @@
 };
 
 &i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <2600000>;
 
        twl: twl@48 {
                        OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0)        /* hsusb0_data7.hsusb0_data7 */
                >;
        };
+       i2c1_pins: pinmux_i2c1_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
+                       OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
+               >;
+       };
 };
 
 &uart2 {
index ec2c8ba..592e17f 100644 (file)
@@ -47,7 +47,7 @@
                        gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;    /* gpio3_83 */
                        wakeup-source;
                        autorepeat;
-                       debounce_interval = <50>;
+                       debounce-interval = <50>;
                };
        };
 
index 3b704cf..a974581 100644 (file)
                max-frequency = <37500000>;
                clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
                resets = <&cru SRST_SDIO>;
                max-frequency = <37500000>;
                clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                default-sample-phase = <158>;
                disable-wp;
                dmas = <&pdma 12>;
index 780ec3a..341deaf 100644 (file)
                interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
                         <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                pinctrl-names = "default";
                pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                pinctrl-names = "default";
                pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>;
                max-frequency = <37500000>;
                clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                bus-width = <8>;
                default-sample-phase = <158>;
                fifo-depth = <0x100>;
index 99cfae8..5eae477 100644 (file)
        };
 };
 
-&cpu0 {
-       cpu0-supply = <&vdd_cpu>;
-       operating-points = <
-               /* KHz    uV */
-               1800000 1400000
-               1608000 1350000
-               1512000 1300000
-               1416000 1200000
-               1200000 1100000
-               1008000 1050000
-                816000 1000000
-                696000  950000
-                600000  900000
-                408000  900000
-                312000  900000
-                216000  900000
-                126000  900000
-       >;
-};
-
 &emmc {
        status = "okay";
        bus-width = <8>;
index 8a74efd..240e7a2 100644 (file)
@@ -56,7 +56,7 @@
                        clocks = <&topclk ZX296702_A9_PERIPHCLK>;
                };
 
-               l2cc: l2-cache-controller@0x00c00000 {
+               l2cc: l2-cache-controller@c00000 {
                        compatible = "arm,pl310-cache";
                        reg = <0x00c00000 0x1000>;
                        cache-unified;
                        arm,double-linefill-incr = <0>;
                };
 
-               pcu: pcu@0xa0008000 {
+               pcu: pcu@a0008000 {
                        compatible = "zte,zx296702-pcu";
                        reg = <0xa0008000 0x1000>;
                };
 
-               topclk: topclk@0x09800000 {
+               topclk: topclk@9800000 {
                        compatible = "zte,zx296702-topcrm-clk";
                        reg = <0x09800000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               lsp1clk: lsp1clk@0x09400000 {
+               lsp1clk: lsp1clk@9400000 {
                        compatible = "zte,zx296702-lsp1crpm-clk";
                        reg = <0x09400000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               lsp0clk: lsp0clk@0x0b000000 {
+               lsp0clk: lsp0clk@b000000 {
                        compatible = "zte,zx296702-lsp0crpm-clk";
                        reg = <0x0b000000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               uart0: serial@0x09405000 {
+               uart0: serial@9405000 {
                        compatible = "zte,zx296702-uart";
                        reg = <0x09405000 0x1000>;
                        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -98,7 +98,7 @@
                        status = "disabled";
                };
 
-               uart1: serial@0x09406000 {
+               uart1: serial@9406000 {
                        compatible = "zte,zx296702-uart";
                        reg = <0x09406000 0x1000>;
                        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
 
-               mmc0: mmc@0x09408000 {
+               mmc0: mmc@9408000 {
                        compatible = "snps,dw-mshc";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                };
 
-               mmc1: mmc@0x0b003000 {
+               mmc1: mmc@b003000 {
                        compatible = "snps,dw-mshc";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                };
 
-               sysctrl: sysctrl@0xa0007000 {
+               sysctrl: sysctrl@a0007000 {
                        compatible = "zte,sysctrl", "syscon";
                        reg = <0xa0007000 0x1000>;
                };
index 2f145c4..92674f2 100644 (file)
@@ -319,7 +319,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_RC_CORE=m
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_LIRC=m
+CONFIG_LIRC=y
 CONFIG_RC_DEVICES=y
 CONFIG_IR_RX51=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
index 629f8e9..cf2701c 100644 (file)
@@ -83,7 +83,7 @@ static void dummy_clock_access(struct timespec64 *ts)
 }
 
 static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;;
+static clock_access_fn __read_boot_clock = dummy_clock_access;
 
 void read_persistent_clock64(struct timespec64 *ts)
 {
index 5638ce0..63d6b40 100644 (file)
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
 
 KVM=../../../../virt/kvm
 
+CFLAGS_ARMV7VE            :=$(call cc-option, -march=armv7ve)
+
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
 obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
 obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
+CFLAGS_banked-sr.o        += $(CFLAGS_ARMV7VE)
+
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+CFLAGS_switch.o                   += $(CFLAGS_ARMV7VE)
 obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
index 111bda8..be4b8b0 100644 (file)
 
 #include <asm/kvm_hyp.h>
 
+/*
+ * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
+ * trick the assembler.
+ */
 __asm__(".arch_extension     virt");
 
 void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
index ee1f83b..4c89a8e 100644 (file)
@@ -69,7 +69,7 @@ static void clps711x_restart(enum reboot_mode mode, const char *cmd)
        soft_restart(0);
 }
 
-static const char *clps711x_compat[] __initconst = {
+static const char *const clps711x_compat[] __initconst = {
        "cirrus,ep7209",
        NULL
 };
index e457f29..d6b1190 100644 (file)
@@ -368,7 +368,7 @@ static struct spi_eeprom at25640a = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm355_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm355_evm_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640a,
index be99724..fad9a56 100644 (file)
@@ -217,7 +217,7 @@ static struct spi_eeprom at25640a = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
+static const struct spi_board_info dm355_leopard_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640a,
index e75741f..e378098 100644 (file)
@@ -726,7 +726,7 @@ static struct spi_eeprom at25640 = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm365_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm365_evm_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640,
index 6b32dc5..2c20599 100644 (file)
@@ -41,7 +41,7 @@ config MACH_ARMADA_375
        depends on ARCH_MULTI_V7
        select ARMADA_370_XP_IRQ
        select ARM_ERRATA_720789
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_GIC
        select ARMADA_375_CLK
        select HAVE_ARM_SCU
@@ -57,7 +57,7 @@ config MACH_ARMADA_38X
        bool "Marvell Armada 380/385 boards"
        depends on ARCH_MULTI_V7
        select ARM_ERRATA_720789
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_GIC
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
index 43e3e18..fa51241 100644 (file)
@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c)
                return -ENOMEM;
        c->dent = d;
 
-       d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
+       d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
        if (!d) {
                err = -ENOMEM;
                goto err_out;
        }
-       d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+       d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
        if (!d) {
                err = -ENOMEM;
                goto err_out;
        }
-       d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+       d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
        if (!d) {
                err = -ENOMEM;
                goto err_out;
index 4bb6751..fc5fb77 100644 (file)
@@ -299,8 +299,6 @@ static void irq_save_context(void)
        if (soc_is_dra7xx())
                return;
 
-       if (!sar_base)
-               sar_base = omap4_get_sar_ram_base();
        if (wakeupgen_ops && wakeupgen_ops->save_context)
                wakeupgen_ops->save_context();
 }
@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node,
        irq_hotplug_init();
        irq_pm_init();
 
+       sar_base = omap4_get_sar_ram_base();
+
        return 0;
 }
 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
index 124f9af..34156ec 100644 (file)
@@ -977,6 +977,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
 
        pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name);
 
+       if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
+               _enable_optional_clocks(oh);
+
        if (oh->_clk)
                clk_enable(oh->_clk);
 
@@ -985,9 +988,6 @@ static int _enable_clocks(struct omap_hwmod *oh)
                        clk_enable(os->_clk);
        }
 
-       if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
-               _enable_optional_clocks(oh);
-
        /* The opt clocks are controlled by the device driver. */
 
        return 0;
index 366158a..6f68576 100644 (file)
@@ -186,7 +186,7 @@ static void omap_pm_end(void)
        cpu_idle_poll_ctrl(false);
 }
 
-static void omap_pm_finish(void)
+static void omap_pm_wake(void)
 {
        if (soc_is_omap34xx())
                omap_prcm_irq_complete();
@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
        .begin          = omap_pm_begin,
        .end            = omap_pm_end,
        .enter          = omap_pm_enter,
-       .finish         = omap_pm_finish,
+       .wake           = omap_pm_wake,
        .valid          = suspend_valid_only_mem,
 };
 
index ece09c9..d61fbd7 100644 (file)
@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = {
        .tick_resume            = omap2_gp_timer_shutdown,
 };
 
-static struct property device_disabled = {
-       .name = "status",
-       .length = sizeof("disabled"),
-       .value = "disabled",
-};
-
 static const struct of_device_id omap_timer_match[] __initconst = {
        { .compatible = "ti,omap2420-timer", },
        { .compatible = "ti,omap3430-timer", },
@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
                                  of_get_property(np, "ti,timer-secure", NULL)))
                        continue;
 
-               if (!of_device_is_compatible(np, "ti,omap-counter32k"))
-                       of_add_property(np, &device_disabled);
+               if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
+                       struct property *prop;
+
+                       prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+                       if (!prop)
+                               return NULL;
+                       prop->name = "status";
+                       prop->value = "disabled";
+                       prop->length = strlen(prop->value);
+                       of_add_property(np, prop);
+               }
                return np;
        }
 
index 2a7bb6c..a810f4d 100644 (file)
@@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
 
 config MACH_DNS323
        bool "D-Link DNS-323"
-       select GENERIC_NET_UTILS
        select I2C_BOARDINFO if I2C
        help
          Say 'Y' here if you want your kernel to support the
@@ -66,7 +65,6 @@ config MACH_DNS323
 
 config MACH_TS209
        bool "QNAP TS-109/TS-209"
-       select GENERIC_NET_UTILS
        help
          Say 'Y' here if you want your kernel to support the
          QNAP TS-109/TS-209 platform.
@@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
 
 config MACH_TS409
        bool "QNAP TS-409"
-       select GENERIC_NET_UTILS
        help
          Say 'Y' here if you want your kernel to support the
          QNAP TS-409 platform.
index cd483bf..d13344b 100644 (file)
@@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
        .phy_addr = MV643XX_ETH_PHY_ADDR(8),
 };
 
+/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
+ * functions be kept somewhere?
+ */
+static int __init dns323_parse_hex_nibble(char n)
+{
+       if (n >= '0' && n <= '9')
+               return n - '0';
+
+       if (n >= 'A' && n <= 'F')
+               return n - 'A' + 10;
+
+       if (n >= 'a' && n <= 'f')
+               return n - 'a' + 10;
+
+       return -1;
+}
+
+static int __init dns323_parse_hex_byte(const char *b)
+{
+       int hi;
+       int lo;
+
+       hi = dns323_parse_hex_nibble(b[0]);
+       lo = dns323_parse_hex_nibble(b[1]);
+
+       if (hi < 0 || lo < 0)
+               return -1;
+
+       return (hi << 4) | lo;
+}
+
 static int __init dns323_read_mac_addr(void)
 {
        u_int8_t addr[6];
-       void __iomem *mac_page;
+       int i;
+       char *mac_page;
 
        /* MAC address is stored as a regular ol' string in /dev/mtdblock4
         * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
@@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
        if (!mac_page)
                return -ENOMEM;
 
-       if (!mac_pton((__force const char *) mac_page, addr))
-               goto error_fail;
+       /* Sanity check the string we're looking at */
+       for (i = 0; i < 5; i++) {
+               if (*(mac_page + (i * 3) + 2) != ':') {
+                       goto error_fail;
+               }
+       }
+
+       for (i = 0; i < 6; i++) {
+               int byte;
+
+               byte = dns323_parse_hex_byte(mac_page + (i * 3));
+               if (byte < 0) {
+                       goto error_fail;
+               }
+
+               addr[i] = byte;
+       }
 
        iounmap(mac_page);
        printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
index 8977498..905d4f2 100644 (file)
@@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(8),
 };
 
+static int __init qnap_tsx09_parse_hex_nibble(char n)
+{
+       if (n >= '0' && n <= '9')
+               return n - '0';
+
+       if (n >= 'A' && n <= 'F')
+               return n - 'A' + 10;
+
+       if (n >= 'a' && n <= 'f')
+               return n - 'a' + 10;
+
+       return -1;
+}
+
+static int __init qnap_tsx09_parse_hex_byte(const char *b)
+{
+       int hi;
+       int lo;
+
+       hi = qnap_tsx09_parse_hex_nibble(b[0]);
+       lo = qnap_tsx09_parse_hex_nibble(b[1]);
+
+       if (hi < 0 || lo < 0)
+               return -1;
+
+       return (hi << 4) | lo;
+}
+
 static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
 {
        u_int8_t addr[6];
+       int i;
 
-       if (!mac_pton(addr_str, addr))
-               return -1;
+       for (i = 0; i < 6; i++) {
+               int byte;
+
+               /*
+                * Enforce "xx:xx:xx:xx:xx:xx\n" format.
+                */
+               if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
+                       return -1;
+
+               byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
+               if (byte < 0)
+                       return -1;
+               addr[i] = byte;
+       }
 
        printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
 
@@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
        unsigned long addr;
 
        for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
-               void __iomem *nor_page;
+               char *nor_page;
                int ret = 0;
 
                nor_page = ioremap(addr, 1024);
                if (nor_page != NULL) {
-                       ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
+                       ret = qnap_tsx09_check_mac_addr(nor_page);
                        iounmap(nor_page);
                }
 
index aff6994..a2399fd 100644 (file)
@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
 /*****************************************************************************
  * Ethernet switch
  ****************************************************************************/
-static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
-static __initdata struct mdio_board_info
-                 orion_ge00_switch_board_info;
+static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
+       .bus_id   = "orion-mii",
+       .modalias = "mv88e6085",
+};
 
 void __init orion_ge00_switch_init(struct dsa_chip_data *d)
 {
-       struct mdio_board_info *bd;
        unsigned int i;
 
        if (!IS_BUILTIN(CONFIG_PHYLIB))
                return;
 
-       for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
-               if (!strcmp(d->port_names[i], "cpu"))
+       for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
+               if (!strcmp(d->port_names[i], "cpu")) {
+                       d->netdev[i] = &orion_ge00.dev;
                        break;
+               }
+       }
 
-       bd = &orion_ge00_switch_board_info;
-       bd->bus_id = orion_ge00_mvmdio_bus_name;
-       bd->mdio_addr = d->sw_addr;
-       d->netdev[i] = &orion_ge00.dev;
-       strcpy(bd->modalias, "mv88e6085");
-       bd->platform_data = d;
+       orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
+       orion_ge00_switch_board_info.platform_data = d;
 
        mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
 }
index a806326..70c776e 100644 (file)
 
                        uart_A: serial@24000 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
-                               reg = <0x0 0x24000 0x0 0x14>;
+                               reg = <0x0 0x24000 0x0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_B: serial@23000 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
-                               reg = <0x0 0x23000 0x0 0x14>;
+                               reg = <0x0 0x23000 0x0 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index 6cb3c2a..4ee2e79 100644 (file)
 
                        uart_A: serial@84c0 {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x84c0 0x0 0x14>;
+                               reg = <0x0 0x84c0 0x0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_B: serial@84dc {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x84dc 0x0 0x14>;
+                               reg = <0x0 0x84dc 0x0 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_C: serial@8700 {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x8700 0x0 0x14>;
+                               reg = <0x0 0x8700 0x0 0x18>;
                                interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_AO: serial@4c0 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
-                               reg = <0x0 0x004c0 0x0 0x14>;
+                               reg = <0x0 0x004c0 0x0 0x18>;
                                interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_AO_B: serial@4e0 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
-                               reg = <0x0 0x004e0 0x0 0x14>;
+                               reg = <0x0 0x004e0 0x0 0x18>;
                                interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index 4f355f1..c851411 100644 (file)
 
                        internal_phy: ethernet-phy@8 {
                                compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22";
+                               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <8>;
                                max-speed = <100>;
                        };
index 4220fbd..ff5c4c4 100644 (file)
@@ -98,7 +98,7 @@
                clock-output-names = "clk125mhz";
        };
 
-       pci {
+       pcie@30000000 {
                compatible = "pci-host-ecam-generic";
                device_type = "pci";
                #interrupt-cells = <1>;
                ranges =
                  <0x02000000    0 0x40000000    0 0x40000000    0 0x20000000
                   0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
+               bus-range = <0 0xff>;
                interrupt-map-mask = <0 0 0 7>;
                interrupt-map =
                      /* addr  pin  ic   icaddr  icintr */
index e94fa1a..047641f 100644 (file)
@@ -51,7 +51,7 @@
                #size-cells = <2>;
                ranges;
 
-               ramoops@0x21f00000 {
+               ramoops@21f00000 {
                        compatible = "ramoops";
                        reg = <0x0 0x21f00000 0x0 0x00100000>;
                        record-size     = <0x00020000>;
index 9fbe470..94597e3 100644 (file)
                        reg = <0 0x10005000 0 0x1000>;
                };
 
-               pio: pinctrl@0x10005000 {
+               pio: pinctrl@10005000 {
                        compatible = "mediatek,mt8173-pinctrl";
                        reg = <0 0x1000b000 0 0x1000>;
                        mediatek,pctl-regmap = <&syscfg_pctl_a>;
index 492a011..1c8f1b8 100644 (file)
                };
 
                agnoc@0 {
-                       qcom,pcie@00600000 {
+                       qcom,pcie@600000 {
                                perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
                        };
 
-                       qcom,pcie@00608000 {
+                       qcom,pcie@608000 {
                                status = "okay";
                                perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
                        };
 
-                       qcom,pcie@00610000 {
+                       qcom,pcie@610000 {
                                status = "okay";
                                perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
                        };
index 4b2afcc..0a6f795 100644 (file)
                        #size-cells = <1>;
                        ranges;
 
-                       pcie0: qcom,pcie@00600000 {
+                       pcie0: qcom,pcie@600000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                status = "disabled";
                                power-domains = <&gcc PCIE0_GDSC>;
 
                        };
 
-                       pcie1: qcom,pcie@00608000 {
+                       pcie1: qcom,pcie@608000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                power-domains = <&gcc PCIE1_GDSC>;
                                bus-range = <0x00 0xff>;
                                                "bus_slave";
                        };
 
-                       pcie2: qcom,pcie@00610000 {
+                       pcie2: qcom,pcie@610000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                power-domains = <&gcc PCIE2_GDSC>;
                                bus-range = <0x00 0xff>;
index 3890468..2825772 100644 (file)
        assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
        assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
        clock_in_out = "input";
-       /* shows instability at 1GBit right now */
-       max-speed = <100>;
        phy-supply = <&vcc_io>;
        phy-mode = "rgmii";
        pinctrl-names = "default";
        pinctrl-0 = <&rgmiim1_pins>;
+       snps,force_thresh_dma_mode;
        snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
        snps,reset-delays-us = <0 10000 50000>;
-       tx_delay = <0x26>;
-       rx_delay = <0x11>;
+       tx_delay = <0x24>;
+       rx_delay = <0x18>;
        status = "okay";
 };
 
index a037ee5..cae3415 100644 (file)
                interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
                         <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                status = "disabled";
        };
                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                status = "disabled";
        };
                interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                status = "disabled";
        };
index aa4d070..03458ac 100644 (file)
                max-frequency = <150000000>;
                clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
                         <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                resets = <&cru SRST_SDIO0>;
index 0f873c8..ce592a4 100644 (file)
        assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
        assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
        assigned-clock-rates = <100000000>;
-       ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>;
+       ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
        num-lanes = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&pcie_clkreqn_cpm>;
index 7aa2144..2605118 100644 (file)
                compatible = "rockchip,rk3399-edp";
                reg = <0x0 0xff970000 0x0 0x8000>;
                interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>;
-               clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>;
-               clock-names = "dp", "pclk";
+               clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>;
+               clock-names = "dp", "pclk", "grf";
                pinctrl-names = "default";
                pinctrl-0 = <&edp_hpd>;
                power-domains = <&power RK3399_PD_EDP>;
index 6618036..9ae31f7 100644 (file)
@@ -1419,7 +1419,7 @@ static int compat_ptrace_hbp_get(unsigned int note_type,
        u64 addr = 0;
        u32 ctrl = 0;
 
-       int err, idx = compat_ptrace_hbp_num_to_idx(num);;
+       int err, idx = compat_ptrace_hbp_num_to_idx(num);
 
        if (num & 1) {
                err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
index 762eeb0..2524fb6 100644 (file)
@@ -66,38 +66,35 @@ ATOMIC_OPS(add, +)
 ATOMIC_OPS(sub, -)
 
 #ifdef __OPTIMIZE__
-#define __ia64_atomic_const(i) __builtin_constant_p(i) ?               \
+#define __ia64_atomic_const(i)                                         \
+       static const int __ia64_atomic_p = __builtin_constant_p(i) ?    \
                ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||       \
-                (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
+                (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
+       __ia64_atomic_p
+#else
+#define __ia64_atomic_const(i) 0
+#endif
 
-#define atomic_add_return(i, v)                                                \
+#define atomic_add_return(i,v)                                         \
 ({                                                                     \
-       int __i = (i);                                                  \
-       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
-       __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) :      \
-                               ia64_atomic_add(__i, v);                \
+       int __ia64_aar_i = (i);                                         \
+       __ia64_atomic_const(i)                                          \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic_add(__ia64_aar_i, v);                     \
 })
 
-#define atomic_sub_return(i, v)                                                \
+#define atomic_sub_return(i,v)                                         \
 ({                                                                     \
-       int __i = (i);                                                  \
-       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
-       __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) :     \
-                               ia64_atomic_sub(__i, v);                \
+       int __ia64_asr_i = (i);                                         \
+       __ia64_atomic_const(i)                                          \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic_sub(__ia64_asr_i, v);                     \
 })
-#else
-#define atomic_add_return(i, v)        ia64_atomic_add(i, v)
-#define atomic_sub_return(i, v)        ia64_atomic_sub(i, v)
-#endif
 
 #define atomic_fetch_add(i,v)                                          \
 ({                                                                     \
        int __ia64_aar_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+       __ia64_atomic_const(i)                                          \
                ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
                : ia64_atomic_fetch_add(__ia64_aar_i, v);               \
 })
@@ -105,11 +102,7 @@ ATOMIC_OPS(sub, -)
 #define atomic_fetch_sub(i,v)                                          \
 ({                                                                     \
        int __ia64_asr_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+       __ia64_atomic_const(i)                                          \
                ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
                : ia64_atomic_fetch_sub(__ia64_asr_i, v);               \
 })
@@ -170,11 +163,7 @@ ATOMIC64_OPS(sub, -)
 #define atomic64_add_return(i,v)                                       \
 ({                                                                     \
        long __ia64_aar_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+       __ia64_atomic_const(i)                                          \
                ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
                : ia64_atomic64_add(__ia64_aar_i, v);                   \
 })
@@ -182,11 +171,7 @@ ATOMIC64_OPS(sub, -)
 #define atomic64_sub_return(i,v)                                       \
 ({                                                                     \
        long __ia64_asr_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+       __ia64_atomic_const(i)                                          \
                ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
                : ia64_atomic64_sub(__ia64_asr_i, v);                   \
 })
@@ -194,11 +179,7 @@ ATOMIC64_OPS(sub, -)
 #define atomic64_fetch_add(i,v)                                                \
 ({                                                                     \
        long __ia64_aar_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+       __ia64_atomic_const(i)                                          \
                ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
                : ia64_atomic64_fetch_add(__ia64_aar_i, v);             \
 })
@@ -206,11 +187,7 @@ ATOMIC64_OPS(sub, -)
 #define atomic64_fetch_sub(i,v)                                                \
 ({                                                                     \
        long __ia64_asr_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+       __ia64_atomic_const(i)                                          \
                ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
                : ia64_atomic64_fetch_sub(__ia64_asr_i, v);             \
 })
index 85bba43..8b5b8e6 100644 (file)
@@ -117,7 +117,7 @@ store_call_start(struct device *dev, struct device_attribute *attr,
 
 #ifdef ERR_INJ_DEBUG
        printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
-       printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]);
+       printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
        printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
 #endif
        return size;
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
        u64 virt_addr=simple_strtoull(buf, NULL, 16);
        int ret;
 
-       ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
+       ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
        if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
                printk("Virtual address %lx is not existing.\n",virt_addr);
index 89f3a14..c55276e 100644 (file)
@@ -16,7 +16,7 @@ import re
 import sys
 
 if len(sys.argv) != 2:
-    print "Usage: %s FILE" % sys.argv[0]
+    print("Usage: %s FILE" % sys.argv[0])
     sys.exit(2)
 
 readelf = os.getenv("READELF", "readelf")
@@ -29,7 +29,7 @@ def check_func (func, slots, rlen_sum):
         global num_errors
         num_errors += 1
         if not func: func = "[%#x-%#x]" % (start, end)
-        print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
+        print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum))
     return
 
 num_funcs = 0
@@ -43,23 +43,23 @@ for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
         check_func(func, slots, rlen_sum)
 
         func  = m.group(1)
-        start = long(m.group(2), 16)
-        end   = long(m.group(3), 16)
+        start = int(m.group(2), 16)
+        end   = int(m.group(3), 16)
         slots = 3 * (end - start) / 16
-        rlen_sum = 0L
+        rlen_sum = 0
         num_funcs += 1
     else:
         m = rlen_pattern.match(line)
         if m:
-            rlen_sum += long(m.group(1))
+            rlen_sum += int(m.group(1))
 check_func(func, slots, rlen_sum)
 
 if num_errors == 0:
-    print "No errors detected in %u functions." % num_funcs
+    print("No errors detected in %u functions." % num_funcs)
 else:
     if num_errors > 1:
         err="errors"
     else:
         err="error"
-    print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
+    print("%u %s detected in %u functions." % (num_errors, err, num_funcs))
     sys.exit(1)
index 3742508..bd5ce31 100644 (file)
@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
 void flush_kernel_icache_range_asm(unsigned long, unsigned long);
 void flush_user_dcache_range_asm(unsigned long, unsigned long);
 void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
 void flush_kernel_dcache_page_asm(void *);
 void flush_kernel_icache_page(void *);
 
index 0e6ab6e..2dbe558 100644 (file)
@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
 #define parisc_requires_coherency()    (0)
 #endif
 
+extern int running_on_qemu;
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_PARISC_PROCESSOR_H */
index 19c0c14..7908977 100644 (file)
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
 int __flush_tlb_range(unsigned long sid, unsigned long start,
                      unsigned long end)
 {
-       unsigned long flags, size;
+       unsigned long flags;
 
-       size = (end - start);
-       if (size >= parisc_tlb_flush_threshold) {
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           end - start >= parisc_tlb_flush_threshold) {
                flush_tlb_all();
                return 1;
        }
@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
        struct vm_area_struct *vma;
        pgd_t *pgd;
 
-       /* Flush the TLB to avoid speculation if coherency is required. */
-       if (parisc_requires_coherency())
-               flush_tlb_all();
-
        /* Flushing the whole cache on each cpu takes forever on
           rp3440, etc.  So, avoid it if the mm isn't too big.  */
-       if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           mm_total_size(mm) >= parisc_cache_flush_threshold) {
+               flush_tlb_all();
                flush_cache_all();
                return;
        }
@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
        if (mm->context == mfsp(3)) {
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                        flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
-                       if ((vma->vm_flags & VM_EXEC) == 0)
-                               continue;
-                       flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+                       if (vma->vm_flags & VM_EXEC)
+                               flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+                       flush_tlb_range(vma, vma->vm_start, vma->vm_end);
                }
                return;
        }
@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end)
 {
-       BUG_ON(!vma->vm_mm->context);
-
-       /* Flush the TLB to avoid speculation if coherency is required. */
-       if (parisc_requires_coherency())
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           end - start >= parisc_cache_flush_threshold) {
                flush_tlb_range(vma, start, end);
-
-       if ((end - start) >= parisc_cache_flush_threshold
-           || vma->vm_mm->context != mfsp(3)) {
                flush_cache_all();
                return;
        }
@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
        flush_user_dcache_range_asm(start, end);
        if (vma->vm_flags & VM_EXEC)
                flush_user_icache_range_asm(start, end);
+       flush_tlb_range(vma, start, end);
 }
 
 void
@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
        BUG_ON(!vma->vm_mm->context);
 
        if (pfn_valid(pfn)) {
-               if (parisc_requires_coherency())
-                       flush_tlb_page(vma, vmaddr);
+               flush_tlb_page(vma, vmaddr);
                __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
        }
 }
@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 void flush_kernel_vmap_range(void *vaddr, int size)
 {
        unsigned long start = (unsigned long)vaddr;
+       unsigned long end = start + size;
 
-       if ((unsigned long)size > parisc_cache_flush_threshold)
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           (unsigned long)size >= parisc_cache_flush_threshold) {
+               flush_tlb_kernel_range(start, end);
                flush_data_cache();
-       else
-               flush_kernel_dcache_range_asm(start, start + size);
+               return;
+       }
+
+       flush_kernel_dcache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 }
 EXPORT_SYMBOL(flush_kernel_vmap_range);
 
 void invalidate_kernel_vmap_range(void *vaddr, int size)
 {
        unsigned long start = (unsigned long)vaddr;
+       unsigned long end = start + size;
 
-       if ((unsigned long)size > parisc_cache_flush_threshold)
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           (unsigned long)size >= parisc_cache_flush_threshold) {
+               flush_tlb_kernel_range(start, end);
                flush_data_cache();
-       else
-               flush_kernel_dcache_range_asm(start, start + size);
+               return;
+       }
+
+       purge_kernel_dcache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
index bbbe360..fbb4e43 100644 (file)
@@ -138,6 +138,16 @@ $pgt_fill_loop:
        std             %dp,0x18(%r10)
 #endif
 
+#ifdef CONFIG_64BIT
+       /* Get PDCE_PROC for monarch CPU. */
+#define MEM_PDC_LO 0x388
+#define MEM_PDC_HI 0x35C
+       ldw             MEM_PDC_LO(%r0),%r3
+       ldw             MEM_PDC_HI(%r0),%r10
+       depd            %r10, 31, 32, %r3        /* move to upper word */
+#endif
+
+
 #ifdef CONFIG_SMP
        /* Set the smp rendezvous address into page zero.
        ** It would be safer to do this in init_smp_config() but
@@ -196,12 +206,6 @@ common_stext:
         ** Someday, palo might not do this for the Monarch either.
         */
 2:
-#define MEM_PDC_LO 0x388
-#define MEM_PDC_HI 0x35C
-       ldw             MEM_PDC_LO(%r0),%r3
-       ldw             MEM_PDC_HI(%r0),%r6
-       depd            %r6, 31, 32, %r3        /* move to upper word */
-
        mfctl           %cr30,%r6               /* PCX-W2 firmware bug */
 
        ldo             PDC_PSW(%r0),%arg0              /* 21 */
@@ -268,6 +272,8 @@ $install_iva:
 aligned_rfi:
        pcxt_ssm_bug
 
+       copy            %r3, %arg0      /* PDCE_PROC for smp_callin() */
+
        rsm             PSW_SM_QUIET,%r0        /* off troublesome PSW bits */
        /* Don't need NOPs, have 8 compliant insn before rfi */
 
index 2d40c4f..67b0f75 100644 (file)
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
        .procend
 ENDPROC_CFI(flush_kernel_dcache_range_asm)
 
+ENTRY_CFI(purge_kernel_dcache_range_asm)
+       .proc
+       .callinfo NO_CALLS
+       .entry
+
+       ldil            L%dcache_stride, %r1
+       ldw             R%dcache_stride(%r1), %r23
+       ldo             -1(%r23), %r21
+       ANDCM           %r26, %r21, %r26
+
+1:      cmpb,COND(<<),n        %r26, %r25,1b
+       pdc,m           %r23(%r26)
+
+       sync
+       syncdma
+       bv              %r0(%r2)
+       nop
+       .exit
+
+       .procend
+ENDPROC_CFI(purge_kernel_dcache_range_asm)
+
 ENTRY_CFI(flush_user_icache_range_asm)
        .proc
        .callinfo NO_CALLS
index 30c28ab..4065b5e 100644 (file)
@@ -292,10 +292,15 @@ smp_cpu_init(int cpunum)
  * Slaves start using C here. Indirectly called from smp_slave_stext.
  * Do what start_kernel() and main() do for boot strap processor (aka monarch)
  */
-void __init smp_callin(void)
+void __init smp_callin(unsigned long pdce_proc)
 {
        int slave_id = cpu_now_booting;
 
+#ifdef CONFIG_64BIT
+       WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
+                       | PAGE0->mem_pdc) != pdce_proc);
+#endif
+
        smp_cpu_init(slave_id);
        preempt_disable();
 
index 4b8fd6d..f7e6845 100644 (file)
@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
        next_tick = cpuinfo->it_value;
 
        /* Calculate how many ticks have elapsed. */
+       now = mfctl(16);
        do {
                ++ticks_elapsed;
                next_tick += cpt;
-               now = mfctl(16);
        } while (next_tick - now > cpt);
 
        /* Store (in CR16 cycles) up to when we are accounting right now. */
@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
         * if one or the other wrapped. If "now" is "bigger" we'll end up
         * with a very large unsigned number.
         */
-       while (next_tick - mfctl(16) > cpt)
+       now = mfctl(16);
+       while (next_tick - now > cpt)
                next_tick += cpt;
 
        /* Program the IT when to deliver the next interrupt.
         * Only bottom 32-bits of next_tick are writable in CR16!
         * Timer interrupt will be delivered at least a few hundred cycles
-        * after the IT fires, so if we are too close (<= 500 cycles) to the
+        * after the IT fires, so if we are too close (<= 8000 cycles) to the
         * next cycle, simply skip it.
         */
-       if (next_tick - mfctl(16) <= 500)
+       if (next_tick - now <= 8000)
                next_tick += cpt;
        mtctl(next_tick, 16);
 
@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
         * different sockets, so mark them unstable and lower rating on
         * multi-socket SMP systems.
         */
-       if (num_online_cpus() > 1) {
+       if (num_online_cpus() > 1 && !running_on_qemu) {
                int cpu;
                unsigned long cpu0_loc;
                cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
index 48f4139..cab32ee 100644 (file)
@@ -629,7 +629,12 @@ void __init mem_init(void)
 #endif
 
        mem_init_print_info(NULL);
-#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
+
+#if 0
+       /*
+        * Do not expose the virtual kernel memory layout to userspace.
+        * But keep code for debugging purposes.
+        */
        printk("virtual kernel memory layout:\n"
               "    vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
               "    memory  : 0x%px - 0x%px   (%4ld MB)\n"
index 511acfd..535add3 100644 (file)
@@ -52,7 +52,7 @@
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
 #define FW_FEATURE_PRRN                ASM_CONST(0x0000000200000000)
 #define FW_FEATURE_DRMEM_V2    ASM_CONST(0x0000000400000000)
-#define FW_FEATURE_DRC_INFO    ASM_CONST(0x0000000400000000)
+#define FW_FEATURE_DRC_INFO    ASM_CONST(0x0000000800000000)
 
 #ifndef __ASSEMBLY__
 
index beea218..0c0b66f 100644 (file)
@@ -384,7 +384,8 @@ static void *eeh_report_resume(void *data, void *userdata)
        eeh_pcid_put(dev);
        pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
 #ifdef CONFIG_PCI_IOV
-       eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
+       if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
+               eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
 #endif
        return NULL;
 }
index adf044d..d22c41c 100644 (file)
@@ -874,7 +874,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
                .mmu = 0,
                .hash_ext = 0,
                .radix_ext = 0,
-               .byte22 = OV5_FEAT(OV5_DRC_INFO),
+               .byte22 = 0,
        },
 
        /* option vector 6: IBM PAPR hints */
index f0f5cd4..f9818d7 100644 (file)
@@ -188,7 +188,7 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
        if (!qpage) {
                pr_err("Failed to allocate queue %d for VCPU %d\n",
                       prio, xc->server_num);
-               return -ENOMEM;;
+               return -ENOMEM;
        }
        memset(qpage, 0, 1 << xive->q_order);
 
index 916844f..3f18036 100644 (file)
@@ -98,7 +98,7 @@ static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
        dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
        dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
        dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
-       dr_cell->flags = cpu_to_be32(lmb->flags);
+       dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 }
 
 static int drmem_update_dt_v2(struct device_node *memory,
@@ -121,7 +121,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
                }
 
                if (prev_lmb->aa_index != lmb->aa_index ||
-                   prev_lmb->flags != lmb->flags)
+                   drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
                        lmb_sets++;
 
                prev_lmb = lmb;
@@ -150,7 +150,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
                }
 
                if (prev_lmb->aa_index != lmb->aa_index ||
-                   prev_lmb->flags != lmb->flags) {
+                   drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
                        /* end of one set, start of another */
                        dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
                        dr_cell++;
index 872d1f6..a9636d8 100644 (file)
@@ -327,6 +327,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
                        break;
+               case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
+                       PPC_LWZ_OFFS(r_A, r_skb, K);
+                       break;
                case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
                        PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
                        break;
index 0a34b0c..0ef3d95 100644 (file)
@@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
         *   goto out;
         */
        PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
+       PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
        PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
        PPC_BCC(COND_GE, out);
 
index 496e476..a6c92c7 100644 (file)
@@ -1854,7 +1854,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
        s64 rc;
 
        if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
-               return -ENODEV;;
+               return -ENODEV;
 
        pe = &phb->ioda.pe_array[pdn->pe_number];
        if (pe->tce_bypass_enabled) {
index 4fb21e1..092715b 100644 (file)
@@ -80,6 +80,10 @@ static void pnv_setup_rfi_flush(void)
                if (np && of_property_read_bool(np, "disabled"))
                        enable--;
 
+               np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+               if (np && of_property_read_bool(np, "disabled"))
+                       enable = 0;
+
                of_node_put(np);
                of_node_put(fw_features);
        }
index 372d7ad..1a52762 100644 (file)
@@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void)
                if (types == L1D_FLUSH_NONE)
                        types = L1D_FLUSH_FALLBACK;
 
-               if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+               if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+                   (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
                        enable = false;
        } else {
                /* Default to fallback if case hcall is not available */
index c0319cb..5510366 100644 (file)
@@ -34,9 +34,9 @@
 #define wmb()          RISCV_FENCE(ow,ow)
 
 /* These barriers do not need to enforce ordering on devices, just memory. */
-#define smp_mb()       RISCV_FENCE(rw,rw)
-#define smp_rmb()      RISCV_FENCE(r,r)
-#define smp_wmb()      RISCV_FENCE(w,w)
+#define __smp_mb()     RISCV_FENCE(rw,rw)
+#define __smp_rmb()    RISCV_FENCE(r,r)
+#define __smp_wmb()    RISCV_FENCE(w,w)
 
 /*
  * This is a very specific barrier: it's currently only used in two places in
index 9c7d707..07c6e81 100644 (file)
 #include "trace.h"
 #include "trace-s390.h"
 
-
-static const intercept_handler_t instruction_handlers[256] = {
-       [0x01] = kvm_s390_handle_01,
-       [0x82] = kvm_s390_handle_lpsw,
-       [0x83] = kvm_s390_handle_diag,
-       [0xaa] = kvm_s390_handle_aa,
-       [0xae] = kvm_s390_handle_sigp,
-       [0xb2] = kvm_s390_handle_b2,
-       [0xb6] = kvm_s390_handle_stctl,
-       [0xb7] = kvm_s390_handle_lctl,
-       [0xb9] = kvm_s390_handle_b9,
-       [0xe3] = kvm_s390_handle_e3,
-       [0xe5] = kvm_s390_handle_e5,
-       [0xeb] = kvm_s390_handle_eb,
-};
-
 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
 {
        struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu)
 
 static int handle_instruction(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
        vcpu->stat.exit_instruction++;
        trace_kvm_s390_intercept_instruction(vcpu,
                                             vcpu->arch.sie_block->ipa,
                                             vcpu->arch.sie_block->ipb);
-       handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
-       if (handler)
-               return handler(vcpu);
-       return -EOPNOTSUPP;
+
+       switch (vcpu->arch.sie_block->ipa >> 8) {
+       case 0x01:
+               return kvm_s390_handle_01(vcpu);
+       case 0x82:
+               return kvm_s390_handle_lpsw(vcpu);
+       case 0x83:
+               return kvm_s390_handle_diag(vcpu);
+       case 0xaa:
+               return kvm_s390_handle_aa(vcpu);
+       case 0xae:
+               return kvm_s390_handle_sigp(vcpu);
+       case 0xb2:
+               return kvm_s390_handle_b2(vcpu);
+       case 0xb6:
+               return kvm_s390_handle_stctl(vcpu);
+       case 0xb7:
+               return kvm_s390_handle_lctl(vcpu);
+       case 0xb9:
+               return kvm_s390_handle_b9(vcpu);
+       case 0xe3:
+               return kvm_s390_handle_e3(vcpu);
+       case 0xe5:
+               return kvm_s390_handle_e5(vcpu);
+       case 0xeb:
+               return kvm_s390_handle_eb(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
index aabf46f..b04616b 100644 (file)
@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
 
 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
+       const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+       const u64 ckc = vcpu->arch.sie_block->ckc;
+
+       if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
+               if ((s64)ckc >= (s64)now)
+                       return 0;
+       } else if (ckc >= now) {
                return 0;
+       }
        return ckc_interrupts_enabled(vcpu);
 }
 
@@ -187,12 +194,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
        return kvm_s390_get_cpu_timer(vcpu) >> 63;
 }
 
-static inline int is_ioirq(unsigned long irq_type)
-{
-       return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
-               (irq_type <= IRQ_PEND_IO_ISC_0));
-}
-
 static uint64_t isc_to_isc_bits(int isc)
 {
        return (0x80 >> isc) << 24;
@@ -236,10 +237,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis
        return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 }
 
-static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
+static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
 {
        return vcpu->kvm->arch.float_int.pending_irqs |
-               vcpu->arch.local_int.pending_irqs |
+               vcpu->arch.local_int.pending_irqs;
+}
+
+static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
+{
+       return pending_irqs_no_gisa(vcpu) |
                kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
 }
 
@@ -337,7 +343,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
 {
-       if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
+       if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
                return;
        else if (psw_ioint_disabled(vcpu))
                kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
@@ -1011,24 +1017,6 @@ out:
        return rc;
 }
 
-typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
-
-static const deliver_irq_t deliver_irq_funcs[] = {
-       [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
-       [IRQ_PEND_MCHK_REP]       = __deliver_machine_check,
-       [IRQ_PEND_PROG]           = __deliver_prog,
-       [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
-       [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
-       [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
-       [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
-       [IRQ_PEND_RESTART]        = __deliver_restart,
-       [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
-       [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
-       [IRQ_PEND_EXT_SERVICE]    = __deliver_service,
-       [IRQ_PEND_PFAULT_DONE]    = __deliver_pfault_done,
-       [IRQ_PEND_VIRTIO]         = __deliver_virtio,
-};
-
 /* Check whether an external call is pending (deliverable or not) */
 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
 {
@@ -1066,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 
 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
 {
-       u64 now, cputm, sltime = 0;
+       const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+       const u64 ckc = vcpu->arch.sie_block->ckc;
+       u64 cputm, sltime = 0;
 
        if (ckc_interrupts_enabled(vcpu)) {
-               now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
-               sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
-               /* already expired or overflow? */
-               if (!sltime || vcpu->arch.sie_block->ckc <= now)
+               if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
+                       if ((s64)now < (s64)ckc)
+                               sltime = tod_to_ns((s64)ckc - (s64)now);
+               } else if (now < ckc) {
+                       sltime = tod_to_ns(ckc - now);
+               }
+               /* already expired */
+               if (!sltime)
                        return 0;
                if (cpu_timer_interrupts_enabled(vcpu)) {
                        cputm = kvm_s390_get_cpu_timer(vcpu);
@@ -1192,7 +1186,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
 {
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
-       deliver_irq_t func;
        int rc = 0;
        unsigned long irq_type;
        unsigned long irqs;
@@ -1212,16 +1205,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        while ((irqs = deliverable_irqs(vcpu)) && !rc) {
                /* bits are in the reverse order of interrupt priority */
                irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
-               if (is_ioirq(irq_type)) {
+               switch (irq_type) {
+               case IRQ_PEND_IO_ISC_0:
+               case IRQ_PEND_IO_ISC_1:
+               case IRQ_PEND_IO_ISC_2:
+               case IRQ_PEND_IO_ISC_3:
+               case IRQ_PEND_IO_ISC_4:
+               case IRQ_PEND_IO_ISC_5:
+               case IRQ_PEND_IO_ISC_6:
+               case IRQ_PEND_IO_ISC_7:
                        rc = __deliver_io(vcpu, irq_type);
-               } else {
-                       func = deliver_irq_funcs[irq_type];
-                       if (!func) {
-                               WARN_ON_ONCE(func == NULL);
-                               clear_bit(irq_type, &li->pending_irqs);
-                               continue;
-                       }
-                       rc = func(vcpu);
+                       break;
+               case IRQ_PEND_MCHK_EX:
+               case IRQ_PEND_MCHK_REP:
+                       rc = __deliver_machine_check(vcpu);
+                       break;
+               case IRQ_PEND_PROG:
+                       rc = __deliver_prog(vcpu);
+                       break;
+               case IRQ_PEND_EXT_EMERGENCY:
+                       rc = __deliver_emergency_signal(vcpu);
+                       break;
+               case IRQ_PEND_EXT_EXTERNAL:
+                       rc = __deliver_external_call(vcpu);
+                       break;
+               case IRQ_PEND_EXT_CLOCK_COMP:
+                       rc = __deliver_ckc(vcpu);
+                       break;
+               case IRQ_PEND_EXT_CPU_TIMER:
+                       rc = __deliver_cpu_timer(vcpu);
+                       break;
+               case IRQ_PEND_RESTART:
+                       rc = __deliver_restart(vcpu);
+                       break;
+               case IRQ_PEND_SET_PREFIX:
+                       rc = __deliver_set_prefix(vcpu);
+                       break;
+               case IRQ_PEND_PFAULT_INIT:
+                       rc = __deliver_pfault_init(vcpu);
+                       break;
+               case IRQ_PEND_EXT_SERVICE:
+                       rc = __deliver_service(vcpu);
+                       break;
+               case IRQ_PEND_PFAULT_DONE:
+                       rc = __deliver_pfault_done(vcpu);
+                       break;
+               case IRQ_PEND_VIRTIO:
+                       rc = __deliver_virtio(vcpu);
+                       break;
+               default:
+                       WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
+                       clear_bit(irq_type, &li->pending_irqs);
                }
        }
 
@@ -1701,7 +1735,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
                kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
                break;
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-               kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
+               if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
+                       kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
                break;
        default:
                kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
index ba4c709..77d7818 100644 (file)
@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
                              unsigned long end);
 
+static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
+{
+       u8 delta_idx = 0;
+
+       /*
+        * The TOD jumps by delta, we have to compensate this by adding
+        * -delta to the epoch.
+        */
+       delta = -delta;
+
+       /* sign-extension - we're adding to signed values below */
+       if ((s64)delta < 0)
+               delta_idx = -1;
+
+       scb->epoch += delta;
+       if (scb->ecd & ECD_MEF) {
+               scb->epdx += delta_idx;
+               if (scb->epoch < delta)
+                       scb->epdx += 1;
+       }
+}
+
 /*
  * This callback is executed during stop_machine(). All CPUs are therefore
  * temporarily stopped. In order not to change guest behavior, we have to
@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
        unsigned long long *delta = v;
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               kvm->arch.epoch -= *delta;
                kvm_for_each_vcpu(i, vcpu, kvm) {
-                       vcpu->arch.sie_block->epoch -= *delta;
+                       kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
+                       if (i == 0) {
+                               kvm->arch.epoch = vcpu->arch.sie_block->epoch;
+                               kvm->arch.epdx = vcpu->arch.sie_block->epdx;
+                       }
                        if (vcpu->arch.cputm_enabled)
                                vcpu->arch.cputm_start += *delta;
                        if (vcpu->arch.vsie_block)
-                               vcpu->arch.vsie_block->epoch -= *delta;
+                               kvm_clock_sync_scb(vcpu->arch.vsie_block,
+                                                  *delta);
                }
        }
        return NOTIFY_OK;
@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
        if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
                return -EFAULT;
 
-       if (test_kvm_facility(kvm, 139))
-               kvm_s390_set_tod_clock_ext(kvm, &gtod);
-       else if (gtod.epoch_idx == 0)
-               kvm_s390_set_tod_clock(kvm, gtod.tod);
-       else
+       if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
                return -EINVAL;
+       kvm_s390_set_tod_clock(kvm, &gtod);
 
        VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
                gtod.epoch_idx, gtod.tod);
@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 
 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       u64 gtod;
+       struct kvm_s390_vm_tod_clock gtod = { 0 };
 
-       if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+       if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
+                          sizeof(gtod.tod)))
                return -EFAULT;
 
-       kvm_s390_set_tod_clock(kvm, gtod);
-       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
+       kvm_s390_set_tod_clock(kvm, &gtod);
+       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
        return 0;
 }
 
@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        mutex_lock(&vcpu->kvm->lock);
        preempt_disable();
        vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+       vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
        preempt_enable();
        mutex_unlock(&vcpu->kvm->lock);
        if (!kvm_is_ucontrol(vcpu->kvm)) {
@@ -3021,8 +3046,8 @@ retry:
        return 0;
 }
 
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
-                                const struct kvm_s390_vm_tod_clock *gtod)
+void kvm_s390_set_tod_clock(struct kvm *kvm,
+                           const struct kvm_s390_vm_tod_clock *gtod)
 {
        struct kvm_vcpu *vcpu;
        struct kvm_s390_tod_clock_ext htod;
@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
        get_tod_clock_ext((char *)&htod);
 
        kvm->arch.epoch = gtod->tod - htod.tod;
-       kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
-
-       if (kvm->arch.epoch > gtod->tod)
-               kvm->arch.epdx -= 1;
+       kvm->arch.epdx = 0;
+       if (test_kvm_facility(kvm, 139)) {
+               kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
+               if (kvm->arch.epoch > gtod->tod)
+                       kvm->arch.epdx -= 1;
+       }
 
        kvm_s390_vcpu_block_all(kvm);
        kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
        mutex_unlock(&kvm->lock);
 }
 
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
-{
-       struct kvm_vcpu *vcpu;
-       int i;
-
-       mutex_lock(&kvm->lock);
-       preempt_disable();
-       kvm->arch.epoch = tod - get_tod_clock();
-       kvm_s390_vcpu_block_all(kvm);
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-       kvm_s390_vcpu_unblock_all(kvm);
-       preempt_enable();
-       mutex_unlock(&kvm->lock);
-}
-
 /**
  * kvm_arch_fault_in_page - fault-in guest page if necessary
  * @vcpu: The corresponding virtual cpu
index bd31b37..f55ac0e 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/processor.h>
 #include <asm/sclp.h>
 
-typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
-
 /* Transactional Memory Execution related macros */
 #define IS_TE_ENABLED(vcpu)    ((vcpu->arch.sie_block->ecb & ECB_TE))
 #define TDB_FORMAT1            1
@@ -283,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
 
 /* implemented in kvm-s390.c */
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
-                                const struct kvm_s390_vm_tod_clock *gtod);
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
+void kvm_s390_set_tod_clock(struct kvm *kvm,
+                           const struct kvm_s390_vm_tod_clock *gtod);
 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
index c4c4e15..f0b4185 100644 (file)
@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
 /* Handle SCK (SET CLOCK) interception */
 static int handle_set_clock(struct kvm_vcpu *vcpu)
 {
+       struct kvm_s390_vm_tod_clock gtod = { 0 };
        int rc;
        u8 ar;
-       u64 op2, val;
+       u64 op2;
 
        vcpu->stat.instruction_sck++;
 
@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
        op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
        if (op2 & 7)    /* Operand must be on a doubleword boundary */
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
-       rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
+       rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
        if (rc)
                return kvm_s390_inject_prog_cond(vcpu, rc);
 
-       VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
-       kvm_s390_set_tod_clock(vcpu->kvm, val);
+       VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
+       kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
 
        kvm_s390_set_psw_cc(vcpu, 0);
        return 0;
@@ -795,55 +796,60 @@ out:
        return rc;
 }
 
-static const intercept_handler_t b2_handlers[256] = {
-       [0x02] = handle_stidp,
-       [0x04] = handle_set_clock,
-       [0x10] = handle_set_prefix,
-       [0x11] = handle_store_prefix,
-       [0x12] = handle_store_cpu_address,
-       [0x14] = kvm_s390_handle_vsie,
-       [0x21] = handle_ipte_interlock,
-       [0x29] = handle_iske,
-       [0x2a] = handle_rrbe,
-       [0x2b] = handle_sske,
-       [0x2c] = handle_test_block,
-       [0x30] = handle_io_inst,
-       [0x31] = handle_io_inst,
-       [0x32] = handle_io_inst,
-       [0x33] = handle_io_inst,
-       [0x34] = handle_io_inst,
-       [0x35] = handle_io_inst,
-       [0x36] = handle_io_inst,
-       [0x37] = handle_io_inst,
-       [0x38] = handle_io_inst,
-       [0x39] = handle_io_inst,
-       [0x3a] = handle_io_inst,
-       [0x3b] = handle_io_inst,
-       [0x3c] = handle_io_inst,
-       [0x50] = handle_ipte_interlock,
-       [0x56] = handle_sthyi,
-       [0x5f] = handle_io_inst,
-       [0x74] = handle_io_inst,
-       [0x76] = handle_io_inst,
-       [0x7d] = handle_stsi,
-       [0xb1] = handle_stfl,
-       [0xb2] = handle_lpswe,
-};
-
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       /*
-        * A lot of B2 instructions are priviledged. Here we check for
-        * the privileged ones, that we can handle in the kernel.
-        * Anything else goes to userspace.
-        */
-       handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
-       if (handler)
-               return handler(vcpu);
-
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x02:
+               return handle_stidp(vcpu);
+       case 0x04:
+               return handle_set_clock(vcpu);
+       case 0x10:
+               return handle_set_prefix(vcpu);
+       case 0x11:
+               return handle_store_prefix(vcpu);
+       case 0x12:
+               return handle_store_cpu_address(vcpu);
+       case 0x14:
+               return kvm_s390_handle_vsie(vcpu);
+       case 0x21:
+       case 0x50:
+               return handle_ipte_interlock(vcpu);
+       case 0x29:
+               return handle_iske(vcpu);
+       case 0x2a:
+               return handle_rrbe(vcpu);
+       case 0x2b:
+               return handle_sske(vcpu);
+       case 0x2c:
+               return handle_test_block(vcpu);
+       case 0x30:
+       case 0x31:
+       case 0x32:
+       case 0x33:
+       case 0x34:
+       case 0x35:
+       case 0x36:
+       case 0x37:
+       case 0x38:
+       case 0x39:
+       case 0x3a:
+       case 0x3b:
+       case 0x3c:
+       case 0x5f:
+       case 0x74:
+       case 0x76:
+               return handle_io_inst(vcpu);
+       case 0x56:
+               return handle_sthyi(vcpu);
+       case 0x7d:
+               return handle_stsi(vcpu);
+       case 0xb1:
+               return handle_stfl(vcpu);
+       case 0xb2:
+               return handle_lpswe(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int handle_epsw(struct kvm_vcpu *vcpu)
@@ -1105,25 +1111,22 @@ static int handle_essa(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static const intercept_handler_t b9_handlers[256] = {
-       [0x8a] = handle_ipte_interlock,
-       [0x8d] = handle_epsw,
-       [0x8e] = handle_ipte_interlock,
-       [0x8f] = handle_ipte_interlock,
-       [0xab] = handle_essa,
-       [0xaf] = handle_pfmf,
-};
-
 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       /* This is handled just as for the B2 instructions. */
-       handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
-       if (handler)
-               return handler(vcpu);
-
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x8a:
+       case 0x8e:
+       case 0x8f:
+               return handle_ipte_interlock(vcpu);
+       case 0x8d:
+               return handle_epsw(vcpu);
+       case 0xab:
+               return handle_essa(vcpu);
+       case 0xaf:
+               return handle_pfmf(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
@@ -1271,22 +1274,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
        return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
 }
 
-static const intercept_handler_t eb_handlers[256] = {
-       [0x2f] = handle_lctlg,
-       [0x25] = handle_stctg,
-       [0x60] = handle_ri,
-       [0x61] = handle_ri,
-       [0x62] = handle_ri,
-};
-
 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
-       if (handler)
-               return handler(vcpu);
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
+       case 0x25:
+               return handle_stctg(vcpu);
+       case 0x2f:
+               return handle_lctlg(vcpu);
+       case 0x60:
+       case 0x61:
+       case 0x62:
+               return handle_ri(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int handle_tprot(struct kvm_vcpu *vcpu)
@@ -1346,10 +1347,12 @@ out_unlock:
 
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
 {
-       /* For e5xx... instructions we only handle TPROT */
-       if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x01:
                return handle_tprot(vcpu);
-       return -EOPNOTSUPP;
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int handle_sckpf(struct kvm_vcpu *vcpu)
@@ -1380,17 +1383,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static const intercept_handler_t x01_handlers[256] = {
-       [0x04] = handle_ptff,
-       [0x07] = handle_sckpf,
-};
-
 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
-       if (handler)
-               return handler(vcpu);
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x04:
+               return handle_ptff(vcpu);
+       case 0x07:
+               return handle_sckpf(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
index ec77270..8961e39 100644 (file)
@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 {
        struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
        struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+       int guest_bp_isolation;
        int rc;
 
        handle_last_fault(vcpu, vsie_page);
@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                s390_handle_mcck();
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+       /* save current guest state of bp isolation override */
+       guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+       /*
+        * The guest is running with BPBC, so we have to force it on for our
+        * nested guest. This is done by enabling BPBC globally, so the BPBC
+        * control in the SCB (which the nested guest can modify) is simply
+        * ignored.
+        */
+       if (test_kvm_facility(vcpu->kvm, 82) &&
+           vcpu->arch.sie_block->fpf & FPF_BPBC)
+               set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
        local_irq_disable();
        guest_enter_irqoff();
        local_irq_enable();
@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        local_irq_disable();
        guest_exit_irqoff();
        local_irq_enable();
+
+       /* restore guest state for bp isolation override */
+       if (!guest_bp_isolation)
+               clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        if (rc == -EINTR) {
index 715def0..01d0f7f 100644 (file)
@@ -1 +1,3 @@
-obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
+obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+endif
index c1236b1..eb7f43f 100644 (file)
@@ -430,6 +430,7 @@ config GOLDFISH
 config RETPOLINE
        bool "Avoid speculative indirect branches in kernel"
        default y
+       select STACK_VALIDATION if HAVE_STACK_VALIDATION
        help
          Compile kernel with the retpoline compiler options to guard against
          kernel-to-user data leaks by avoiding speculative indirect
index fad5516..498c1b8 100644 (file)
@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
 # Avoid indirect branches in kernel to deal with Spectre
 ifdef CONFIG_RETPOLINE
-    RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
-    ifneq ($(RETPOLINE_CFLAGS),)
-        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
-    endif
+ifneq ($(RETPOLINE_CFLAGS),)
+  KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+endif
 endif
 
 archscripts: scripts_basic
index 353e20c..886a911 100644 (file)
@@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
        struct efi_uga_draw_protocol *uga = NULL, *first_uga;
        efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
        unsigned long nr_ugas;
-       u32 *handles = (u32 *)uga_handle;;
+       u32 *handles = (u32 *)uga_handle;
        efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
@@ -484,7 +484,7 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
        struct efi_uga_draw_protocol *uga = NULL, *first_uga;
        efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
        unsigned long nr_ugas;
-       u64 *handles = (u64 *)uga_handle;;
+       u64 *handles = (u64 *)uga_handle;
        efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
index dce7092..be63330 100644 (file)
@@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with
 
 #define SIZEOF_PTREGS  21*8
 
-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
        /*
         * Push registers and sanitize registers of values that a
         * speculation attack might otherwise want to exploit. The
@@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with
         * could be put to use in a speculative execution gadget.
         * Interleave XOR with PUSH for better uop scheduling:
         */
+       .if \save_ret
+       pushq   %rsi            /* pt_regs->si */
+       movq    8(%rsp), %rsi   /* temporarily store the return address in %rsi */
+       movq    %rdi, 8(%rsp)   /* pt_regs->di (overwriting original return address) */
+       .else
        pushq   %rdi            /* pt_regs->di */
        pushq   %rsi            /* pt_regs->si */
+       .endif
        pushq   \rdx            /* pt_regs->dx */
        pushq   %rcx            /* pt_regs->cx */
        pushq   \rax            /* pt_regs->ax */
        pushq   %r8             /* pt_regs->r8 */
-       xorq    %r8, %r8        /* nospec   r8 */
+       xorl    %r8d, %r8d      /* nospec   r8 */
        pushq   %r9             /* pt_regs->r9 */
-       xorq    %r9, %r9        /* nospec   r9 */
+       xorl    %r9d, %r9d      /* nospec   r9 */
        pushq   %r10            /* pt_regs->r10 */
-       xorq    %r10, %r10      /* nospec   r10 */
+       xorl    %r10d, %r10d    /* nospec   r10 */
        pushq   %r11            /* pt_regs->r11 */
-       xorq    %r11, %r11      /* nospec   r11*/
+       xorl    %r11d, %r11d    /* nospec   r11*/
        pushq   %rbx            /* pt_regs->rbx */
        xorl    %ebx, %ebx      /* nospec   rbx*/
        pushq   %rbp            /* pt_regs->rbp */
        xorl    %ebp, %ebp      /* nospec   rbp*/
        pushq   %r12            /* pt_regs->r12 */
-       xorq    %r12, %r12      /* nospec   r12*/
+       xorl    %r12d, %r12d    /* nospec   r12*/
        pushq   %r13            /* pt_regs->r13 */
-       xorq    %r13, %r13      /* nospec   r13*/
+       xorl    %r13d, %r13d    /* nospec   r13*/
        pushq   %r14            /* pt_regs->r14 */
-       xorq    %r14, %r14      /* nospec   r14*/
+       xorl    %r14d, %r14d    /* nospec   r14*/
        pushq   %r15            /* pt_regs->r15 */
-       xorq    %r15, %r15      /* nospec   r15*/
+       xorl    %r15d, %r15d    /* nospec   r15*/
        UNWIND_HINT_REGS
+       .if \save_ret
+       pushq   %rsi            /* return address on top of stack */
+       .endif
 .endm
 
 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
@@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with
  */
 .macro ENCODE_FRAME_POINTER ptregs_offset=0
 #ifdef CONFIG_FRAME_POINTER
-       .if \ptregs_offset
-               leaq \ptregs_offset(%rsp), %rbp
-       .else
-               mov %rsp, %rbp
-       .endif
-       orq     $0x1, %rbp
+       leaq 1+\ptregs_offset(%rsp), %rbp
 #endif
 .endm
 
index 16c2c02..6ad064c 100644 (file)
@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
         * exist, overwrite the RSB with entries which capture
         * speculative execution to prevent attack.
         */
-       /* Clobbers %ebx */
-       FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+       FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
 #endif
 
        /* restore callee-saved registers */
index 8971bd6..805f527 100644 (file)
@@ -55,7 +55,7 @@ END(native_usergs_sysret64)
 
 .macro TRACE_IRQS_FLAGS flags:req
 #ifdef CONFIG_TRACE_IRQFLAGS
-       bt      $9, \flags              /* interrupts off? */
+       btl     $9, \flags              /* interrupts off? */
        jnc     1f
        TRACE_IRQS_ON
 1:
@@ -364,8 +364,7 @@ ENTRY(__switch_to_asm)
         * exist, overwrite the RSB with entries which capture
         * speculative execution to prevent attack.
         */
-       /* Clobbers %rbx */
-       FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+       FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
 #endif
 
        /* restore callee-saved registers */
@@ -449,9 +448,19 @@ END(irq_entries_start)
  *
  * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
  */
-.macro ENTER_IRQ_STACK regs=1 old_rsp
+.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
        DEBUG_ENTRY_ASSERT_IRQS_OFF
+
+       .if \save_ret
+       /*
+        * If save_ret is set, the original stack contains one additional
+        * entry -- the return address. Therefore, move the address one
+        * entry below %rsp to \old_rsp.
+        */
+       leaq    8(%rsp), \old_rsp
+       .else
        movq    %rsp, \old_rsp
+       .endif
 
        .if \regs
        UNWIND_HINT_REGS base=\old_rsp
@@ -497,6 +506,15 @@ END(irq_entries_start)
        .if \regs
        UNWIND_HINT_REGS indirect=1
        .endif
+
+       .if \save_ret
+       /*
+        * Push the return address to the stack. This return address can
+        * be found at the "real" original RSP, which was offset by 8 at
+        * the beginning of this macro.
+        */
+       pushq   -8(\old_rsp)
+       .endif
 .endm
 
 /*
@@ -520,27 +538,65 @@ END(irq_entries_start)
 .endm
 
 /*
- * Interrupt entry/exit.
- *
- * Interrupt entry points save only callee clobbered registers in fast path.
+ * Interrupt entry helper function.
  *
- * Entry runs with interrupts off.
+ * Entry runs with interrupts off. Stack layout at entry:
+ * +----------------------------------------------------+
+ * | regs->ss                                          |
+ * | regs->rsp                                         |
+ * | regs->eflags                                      |
+ * | regs->cs                                          |
+ * | regs->ip                                          |
+ * +----------------------------------------------------+
+ * | regs->orig_ax = ~(interrupt number)               |
+ * +----------------------------------------------------+
+ * | return address                                    |
+ * +----------------------------------------------------+
  */
-
-/* 0(%rsp): ~(interrupt number) */
-       .macro interrupt func
+ENTRY(interrupt_entry)
+       UNWIND_HINT_FUNC
+       ASM_CLAC
        cld
 
-       testb   $3, CS-ORIG_RAX(%rsp)
+       testb   $3, CS-ORIG_RAX+8(%rsp)
        jz      1f
        SWAPGS
-       call    switch_to_thread_stack
+
+       /*
+        * Switch to the thread stack. The IRET frame and orig_ax are
+        * on the stack, as well as the return address. RDI..R12 are
+        * not (yet) on the stack and space has not (yet) been
+        * allocated for them.
+        */
+       pushq   %rdi
+
+       /* Need to switch before accessing the thread stack. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+        /*
+         * We have RDI, return address, and orig_ax on the stack on
+         * top of the IRET frame. That means offset=24
+         */
+       UNWIND_HINT_IRET_REGS base=%rdi offset=24
+
+       pushq   7*8(%rdi)               /* regs->ss */
+       pushq   6*8(%rdi)               /* regs->rsp */
+       pushq   5*8(%rdi)               /* regs->eflags */
+       pushq   4*8(%rdi)               /* regs->cs */
+       pushq   3*8(%rdi)               /* regs->ip */
+       pushq   2*8(%rdi)               /* regs->orig_ax */
+       pushq   8(%rdi)                 /* return address */
+       UNWIND_HINT_FUNC
+
+       movq    (%rdi), %rdi
 1:
 
-       PUSH_AND_CLEAR_REGS
-       ENCODE_FRAME_POINTER
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
 
-       testb   $3, CS(%rsp)
+       testb   $3, CS+8(%rsp)
        jz      1f
 
        /*
@@ -548,7 +604,7 @@ END(irq_entries_start)
         *
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
-        * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
+        * (which can take locks).  Since TRACE_IRQS_OFF is idempotent,
         * the simplest way to handle it is to just call it twice if
         * we enter from user mode.  There's no reason to optimize this since
         * TRACE_IRQS_OFF is a no-op if lockdep is off.
@@ -558,12 +614,15 @@ END(irq_entries_start)
        CALL_enter_from_user_mode
 
 1:
-       ENTER_IRQ_STACK old_rsp=%rdi
+       ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
        /* We entered an interrupt context - irqs are off: */
        TRACE_IRQS_OFF
 
-       call    \func   /* rdi points to pt_regs */
-       .endm
+       ret
+END(interrupt_entry)
+
+
+/* Interrupt entry/exit. */
 
        /*
         * The interrupt stubs push (~vector+0x80) onto the stack and
@@ -571,9 +630,10 @@ END(irq_entries_start)
         */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-       ASM_CLAC
        addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
-       interrupt do_IRQ
+       call    interrupt_entry
+       UNWIND_HINT_REGS indirect=1
+       call    do_IRQ  /* rdi points to pt_regs */
        /* 0(%rsp): old RSP */
 ret_from_intr:
        DISABLE_INTERRUPTS(CLBR_ANY)
@@ -766,10 +826,11 @@ END(common_interrupt)
 .macro apicinterrupt3 num sym do_sym
 ENTRY(\sym)
        UNWIND_HINT_IRET_REGS
-       ASM_CLAC
        pushq   $~(\num)
 .Lcommon_\sym:
-       interrupt \do_sym
+       call    interrupt_entry
+       UNWIND_HINT_REGS indirect=1
+       call    \do_sym /* rdi points to pt_regs */
        jmp     ret_from_intr
 END(\sym)
 .endm
@@ -832,34 +893,6 @@ apicinterrupt IRQ_WORK_VECTOR                      irq_work_interrupt              smp_irq_work_interrupt
  */
 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
 
-/*
- * Switch to the thread stack.  This is called with the IRET frame and
- * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
- * space has not been allocated for them.)
- */
-ENTRY(switch_to_thread_stack)
-       UNWIND_HINT_FUNC
-
-       pushq   %rdi
-       /* Need to switch before accessing the thread stack. */
-       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
-       movq    %rsp, %rdi
-       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-       UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
-
-       pushq   7*8(%rdi)               /* regs->ss */
-       pushq   6*8(%rdi)               /* regs->rsp */
-       pushq   5*8(%rdi)               /* regs->eflags */
-       pushq   4*8(%rdi)               /* regs->cs */
-       pushq   3*8(%rdi)               /* regs->ip */
-       pushq   2*8(%rdi)               /* regs->orig_ax */
-       pushq   8(%rdi)                 /* return address */
-       UNWIND_HINT_FUNC
-
-       movq    (%rdi), %rdi
-       ret
-END(switch_to_thread_stack)
-
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
        UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -875,12 +908,8 @@ ENTRY(\sym)
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
        .endif
 
-       /* Save all registers in pt_regs */
-       PUSH_AND_CLEAR_REGS
-       ENCODE_FRAME_POINTER
-
        .if \paranoid < 2
-       testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
+       testb   $3, CS-ORIG_RAX(%rsp)           /* If coming from userspace, switch stacks */
        jnz     .Lfrom_usermode_switch_stack_\@
        .endif
 
@@ -1130,13 +1159,15 @@ idtentry machine_check          do_mce                  has_error_code=0        paranoid=1
 #endif
 
 /*
- * Switch gs if needed.
+ * Save all registers in pt_regs, and switch gs if needed.
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(paranoid_entry)
        UNWIND_HINT_FUNC
        cld
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
        movl    $1, %ebx
        movl    $MSR_GS_BASE, %ecx
        rdmsr
@@ -1181,12 +1212,14 @@ ENTRY(paranoid_exit)
 END(paranoid_exit)
 
 /*
- * Switch gs if needed.
+ * Save all registers in pt_regs, and switch GS if needed.
  * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
 ENTRY(error_entry)
-       UNWIND_HINT_REGS offset=8
+       UNWIND_HINT_FUNC
        cld
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
        testb   $3, CS+8(%rsp)
        jz      .Lerror_kernelspace
 
@@ -1577,8 +1610,6 @@ end_repeat_nmi:
         * frame to point back to repeat_nmi.
         */
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
-       PUSH_AND_CLEAR_REGS
-       ENCODE_FRAME_POINTER
 
        /*
         * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
index fd65e01..e811dd9 100644 (file)
@@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
-       xorq    %r8, %r8                /* nospec   r8 */
+       xorl    %r8d, %r8d              /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
-       xorq    %r9, %r9                /* nospec   r9 */
+       xorl    %r9d, %r9d              /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
-       xorq    %r10, %r10              /* nospec   r10 */
+       xorl    %r10d, %r10d            /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
-       xorq    %r11, %r11              /* nospec   r11 */
+       xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
        xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   $0                      /* pt_regs->r12 = 0 */
-       xorq    %r12, %r12              /* nospec   r12 */
+       xorl    %r12d, %r12d            /* nospec   r12 */
        pushq   $0                      /* pt_regs->r13 = 0 */
-       xorq    %r13, %r13              /* nospec   r13 */
+       xorl    %r13d, %r13d            /* nospec   r13 */
        pushq   $0                      /* pt_regs->r14 = 0 */
-       xorq    %r14, %r14              /* nospec   r14 */
+       xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
-       xorq    %r15, %r15              /* nospec   r15 */
+       xorl    %r15d, %r15d            /* nospec   r15 */
        cld
 
        /*
@@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
        pushq   %rbp                    /* pt_regs->cx (stashed in bp) */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
-       xorq    %r8, %r8                /* nospec   r8 */
+       xorl    %r8d, %r8d              /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
-       xorq    %r9, %r9                /* nospec   r9 */
+       xorl    %r9d, %r9d              /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
-       xorq    %r10, %r10              /* nospec   r10 */
+       xorl    %r10d, %r10d            /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
-       xorq    %r11, %r11              /* nospec   r11 */
+       xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
        xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   $0                      /* pt_regs->r12 = 0 */
-       xorq    %r12, %r12              /* nospec   r12 */
+       xorl    %r12d, %r12d            /* nospec   r12 */
        pushq   $0                      /* pt_regs->r13 = 0 */
-       xorq    %r13, %r13              /* nospec   r13 */
+       xorl    %r13d, %r13d            /* nospec   r13 */
        pushq   $0                      /* pt_regs->r14 = 0 */
-       xorq    %r14, %r14              /* nospec   r14 */
+       xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
-       xorq    %r15, %r15              /* nospec   r15 */
+       xorl    %r15d, %r15d            /* nospec   r15 */
 
        /*
         * User mode is traced as though IRQs are on, and SYSENTER
@@ -298,9 +298,9 @@ sysret32_from_system_call:
         */
        SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
 
-       xorq    %r8, %r8
-       xorq    %r9, %r9
-       xorq    %r10, %r10
+       xorl    %r8d, %r8d
+       xorl    %r9d, %r9d
+       xorl    %r10d, %r10d
        swapgs
        sysretl
 END(entry_SYSCALL_compat)
@@ -347,10 +347,23 @@ ENTRY(entry_INT80_compat)
         */
        movl    %eax, %eax
 
+       /* switch to thread stack expects orig_ax and rdi to be pushed */
        pushq   %rax                    /* pt_regs->orig_ax */
+       pushq   %rdi                    /* pt_regs->di */
+
+       /* Need to switch before accessing the thread stack. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+       pushq   6*8(%rdi)               /* regs->ss */
+       pushq   5*8(%rdi)               /* regs->rsp */
+       pushq   4*8(%rdi)               /* regs->eflags */
+       pushq   3*8(%rdi)               /* regs->cs */
+       pushq   2*8(%rdi)               /* regs->ip */
+       pushq   1*8(%rdi)               /* regs->orig_ax */
 
-       /* switch to thread stack expects orig_ax to be pushed */
-       call    switch_to_thread_stack
+       movq    (%rdi), %rdi            /* restore %rdi */
 
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
@@ -358,25 +371,25 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
-       xorq    %r8, %r8                /* nospec   r8 */
+       xorl    %r8d, %r8d              /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
-       xorq    %r9, %r9                /* nospec   r9 */
+       xorl    %r9d, %r9d              /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
-       xorq    %r10, %r10              /* nospec   r10 */
+       xorl    %r10d, %r10d            /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
-       xorq    %r11, %r11              /* nospec   r11 */
+       xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp */
        xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   %r12                    /* pt_regs->r12 */
-       xorq    %r12, %r12              /* nospec   r12 */
+       xorl    %r12d, %r12d            /* nospec   r12 */
        pushq   %r13                    /* pt_regs->r13 */
-       xorq    %r13, %r13              /* nospec   r13 */
+       xorl    %r13d, %r13d            /* nospec   r13 */
        pushq   %r14                    /* pt_regs->r14 */
-       xorq    %r14, %r14              /* nospec   r14 */
+       xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   %r15                    /* pt_regs->r15 */
-       xorq    %r15, %r15              /* nospec   r15 */
+       xorl    %r15d, %r15d            /* nospec   r15 */
        cld
 
        /*
index 4d4015d..c356098 100644 (file)
@@ -7,6 +7,8 @@
 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
 #define _ASM_X86_MACH_DEFAULT_APM_H
 
+#include <asm/nospec-branch.h>
+
 #ifdef APM_ZERO_SEGS
 #      define APM_DO_ZERO_SEGS \
                "pushl %%ds\n\t" \
@@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
+       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
                  "=S" (*esi)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
+       firmware_restrict_branch_speculation_end();
 }
 
 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
+       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
                  "=S" (si)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
+       firmware_restrict_branch_speculation_end();
        return error;
 }
 
index 4d11161..1908214 100644 (file)
@@ -38,7 +38,4 @@ INDIRECT_THUNK(dx)
 INDIRECT_THUNK(si)
 INDIRECT_THUNK(di)
 INDIRECT_THUNK(bp)
-asmlinkage void __fill_rsb(void);
-asmlinkage void __clear_rsb(void);
-
 #endif /* CONFIG_RETPOLINE */
index 3fa0398..9f645ba 100644 (file)
@@ -78,7 +78,7 @@ set_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)CONST_MASK(nr))
                        : "memory");
        } else {
-               asm volatile(LOCK_PREFIX "bts %1,%0"
+               asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
                        : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
        }
 }
@@ -94,7 +94,7 @@ set_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
+       asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
 }
 
 /**
@@ -115,7 +115,7 @@ clear_bit(long nr, volatile unsigned long *addr)
                        : CONST_MASK_ADDR(nr, addr)
                        : "iq" ((u8)~CONST_MASK(nr)));
        } else {
-               asm volatile(LOCK_PREFIX "btr %1,%0"
+               asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
                        : BITOP_ADDR(addr)
                        : "Ir" (nr));
        }
@@ -137,7 +137,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
 
 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
 }
 
 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@@ -182,7 +182,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
  */
 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
 }
 
 /**
@@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
                        : CONST_MASK_ADDR(nr, addr)
                        : "iq" ((u8)CONST_MASK(nr)));
        } else {
-               asm volatile(LOCK_PREFIX "btc %1,%0"
+               asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
                        : BITOP_ADDR(addr)
                        : "Ir" (nr));
        }
@@ -217,7 +217,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
+       GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts),
+                        *addr, "Ir", nr, "%0", c);
 }
 
 /**
@@ -246,7 +247,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
 {
        bool oldbit;
 
-       asm("bts %2,%1"
+       asm(__ASM_SIZE(bts) " %2,%1"
            CC_SET(c)
            : CC_OUT(c) (oldbit), ADDR
            : "Ir" (nr));
@@ -263,7 +264,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
  */
 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
+       GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr),
+                        *addr, "Ir", nr, "%0", c);
 }
 
 /**
@@ -286,7 +288,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
 {
        bool oldbit;
 
-       asm volatile("btr %2,%1"
+       asm volatile(__ASM_SIZE(btr) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit), ADDR
                     : "Ir" (nr));
@@ -298,7 +300,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
 {
        bool oldbit;
 
-       asm volatile("btc %2,%1"
+       asm volatile(__ASM_SIZE(btc) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit), ADDR
                     : "Ir" (nr) : "memory");
@@ -316,7 +318,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
  */
 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
+       GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc),
+                        *addr, "Ir", nr, "%0", c);
 }
 
 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
@@ -329,7 +332,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
 {
        bool oldbit;
 
-       asm volatile("bt %2,%1"
+       asm volatile(__ASM_SIZE(bt) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit)
                     : "m" (*(unsigned long *)addr), "Ir" (nr));
index 0dfe4d3..f41079d 100644 (file)
 #define X86_FEATURE_SEV                        ( 7*32+20) /* AMD Secure Encrypted Virtualization */
 
 #define X86_FEATURE_USE_IBPB           ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW                ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
index 85f6ccb..a399c1e 100644 (file)
@@ -6,6 +6,7 @@
 #include <asm/pgtable.h>
 #include <asm/processor-flags.h>
 #include <asm/tlb.h>
+#include <asm/nospec-branch.h>
 
 /*
  * We map the EFI regions needed for runtime services non-contiguously,
 
 extern asmlinkage unsigned long efi_call_phys(void *, ...);
 
-#define arch_efi_call_virt_setup()     kernel_fpu_begin()
-#define arch_efi_call_virt_teardown()  kernel_fpu_end()
+#define arch_efi_call_virt_setup()                                     \
+({                                                                     \
+       kernel_fpu_begin();                                             \
+       firmware_restrict_branch_speculation_start();                   \
+})
+
+#define arch_efi_call_virt_teardown()                                  \
+({                                                                     \
+       firmware_restrict_branch_speculation_end();                     \
+       kernel_fpu_end();                                               \
+})
+
 
 /*
  * Wrap all the virtual calls in a way that forces the parameters on the stack.
@@ -73,6 +84,7 @@ struct efi_scratch {
        efi_sync_low_kernel_mappings();                                 \
        preempt_disable();                                              \
        __kernel_fpu_begin();                                           \
+       firmware_restrict_branch_speculation_start();                   \
                                                                        \
        if (efi_scratch.use_pgd) {                                      \
                efi_scratch.prev_cr3 = __read_cr3();                    \
@@ -91,6 +103,7 @@ struct efi_scratch {
                __flush_tlb_all();                                      \
        }                                                               \
                                                                        \
+       firmware_restrict_branch_speculation_end();                     \
        __kernel_fpu_end();                                             \
        preempt_enable();                                               \
 })
index dd6f57a..b605a5b 100644 (file)
@@ -507,6 +507,7 @@ struct kvm_vcpu_arch {
        u64 smi_count;
        bool tpr_access_reporting;
        u64 ia32_xss;
+       u64 microcode_version;
 
        /*
         * Paging state of the vcpu
@@ -1095,6 +1096,8 @@ struct kvm_x86_ops {
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
        int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
        int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+
+       int (*get_msr_feature)(struct kvm_msr_entry *entry);
 };
 
 struct kvm_arch_async_pf {
@@ -1464,7 +1467,4 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
-               unsigned long start, unsigned long end);
-
 #endif /* _ASM_X86_KVM_HOST_H */
index 55520ce..7fb1047 100644 (file)
@@ -37,7 +37,12 @@ struct cpu_signature {
 
 struct device;
 
-enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+enum ucode_state {
+       UCODE_OK        = 0,
+       UCODE_UPDATED,
+       UCODE_NFOUND,
+       UCODE_ERROR,
+};
 
 struct microcode_ops {
        enum ucode_state (*request_microcode_user) (int cpu,
@@ -54,7 +59,7 @@ struct microcode_ops {
         * are being called.
         * See also the "Synchronization" section in microcode_core.c.
         */
-       int (*apply_microcode) (int cpu);
+       enum ucode_state (*apply_microcode) (int cpu);
        int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
 };
 
index c931b88..1de72ce 100644 (file)
@@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot)
        return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
 #else
        BUG();
+       return (void *)fix_to_virt(FIX_HOLE);
 #endif
 }
 
index 81a1be3..d0dabea 100644 (file)
@@ -8,6 +8,50 @@
 #include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS         16      /* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version â€” two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp)      \
+       mov     $(nr/2), reg;                   \
+771:                                           \
+       call    772f;                           \
+773:   /* speculation trap */                  \
+       pause;                                  \
+       lfence;                                 \
+       jmp     773b;                           \
+772:                                           \
+       call    774f;                           \
+775:   /* speculation trap */                  \
+       pause;                                  \
+       lfence;                                 \
+       jmp     775b;                           \
+774:                                           \
+       dec     reg;                            \
+       jnz     771b;                           \
+       add     $(BITS_PER_LONG/8) * nr, sp;
+
 #ifdef __ASSEMBLY__
 
 /*
        .popsection
 .endm
 
+/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+.macro ANNOTATE_RETPOLINE_SAFE
+       .Lannotate_\@:
+       .pushsection .discard.retpoline_safe
+       _ASM_PTR .Lannotate_\@
+       .popsection
+.endm
+
 /*
  * These are the bare retpoline primitives for indirect jmp and call.
  * Do not use these directly; they only exist to make the ALTERNATIVE
 .macro JMP_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
        ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE_2 __stringify(jmp *\reg),                           \
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),  \
                __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
-               __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+               __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
 #else
        jmp     *\reg
 #endif
 .macro CALL_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
        ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE_2 __stringify(call *\reg),                          \
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
                __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
-               __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+               __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
 #else
        call    *\reg
 #endif
 .endm
 
-/* This clobbers the BX register */
-.macro FILL_RETURN_BUFFER nr:req ftr:req
+ /*
+  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+  * monstrosity above, manually.
+  */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
 #ifdef CONFIG_RETPOLINE
-       ALTERNATIVE "", "call __clear_rsb", \ftr
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE "jmp .Lskip_rsb_\@",                                \
+               __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))    \
+               \ftr
+.Lskip_rsb_\@:
 #endif
 .endm
 
        ".long 999b - .\n\t"                                    \
        ".popsection\n\t"
 
+#define ANNOTATE_RETPOLINE_SAFE                                        \
+       "999:\n\t"                                              \
+       ".pushsection .discard.retpoline_safe\n\t"              \
+       _ASM_PTR " 999b\n\t"                                    \
+       ".popsection\n\t"
+
 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
 
 /*
 # define CALL_NOSPEC                                           \
        ANNOTATE_NOSPEC_ALTERNATIVE                             \
        ALTERNATIVE(                                            \
+       ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
        "call __x86_indirect_thunk_%V[thunk_target]\n",         \
        X86_FEATURE_RETPOLINE)
@@ -156,26 +226,54 @@ extern char __indirect_thunk_end[];
 static inline void vmexit_fill_RSB(void)
 {
 #ifdef CONFIG_RETPOLINE
-       alternative_input("",
-                         "call __fill_rsb",
-                         X86_FEATURE_RETPOLINE,
-                         ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+       unsigned long loops;
+
+       asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+                     ALTERNATIVE("jmp 910f",
+                                 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+                                 X86_FEATURE_RETPOLINE)
+                     "910:"
+                     : "=r" (loops), ASM_CALL_CONSTRAINT
+                     : : "memory" );
 #endif
 }
 
+#define alternative_msr_write(_msr, _val, _feature)            \
+       asm volatile(ALTERNATIVE("",                            \
+                                "movl %[msr], %%ecx\n\t"       \
+                                "movl %[val], %%eax\n\t"       \
+                                "movl $0, %%edx\n\t"           \
+                                "wrmsr",                       \
+                                _feature)                      \
+                    : : [msr] "i" (_msr), [val] "i" (_val)     \
+                    : "eax", "ecx", "edx", "memory")
+
 static inline void indirect_branch_prediction_barrier(void)
 {
-       asm volatile(ALTERNATIVE("",
-                                "movl %[msr], %%ecx\n\t"
-                                "movl %[val], %%eax\n\t"
-                                "movl $0, %%edx\n\t"
-                                "wrmsr",
-                                X86_FEATURE_USE_IBPB)
-                    : : [msr] "i" (MSR_IA32_PRED_CMD),
-                        [val] "i" (PRED_CMD_IBPB)
-                    : "eax", "ecx", "edx", "memory");
+       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+                             X86_FEATURE_USE_IBPB);
 }
 
+/*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+ *
+ * (Implemented as CPP macros due to header hell.)
+ */
+#define firmware_restrict_branch_speculation_start()                   \
+do {                                                                   \
+       preempt_disable();                                              \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,       \
+                             X86_FEATURE_USE_IBRS_FW);                 \
+} while (0)
+
+#define firmware_restrict_branch_speculation_end()                     \
+do {                                                                   \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,                    \
+                             X86_FEATURE_USE_IBRS_FW);                 \
+       preempt_enable();                                               \
+} while (0)
+
 #endif /* __ASSEMBLY__ */
 
 /*
index 554841f..c83a2f4 100644 (file)
@@ -7,6 +7,7 @@
 #ifdef CONFIG_PARAVIRT
 #include <asm/pgtable_types.h>
 #include <asm/asm.h>
+#include <asm/nospec-branch.h>
 
 #include <asm/paravirt_types.h>
 
@@ -879,23 +880,27 @@ extern void default_banner(void);
 
 #define INTERRUPT_RETURN                                               \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
-                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
+                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
 
 #define DISABLE_INTERRUPTS(clobbers)                                   \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #define ENABLE_INTERRUPTS(clobbers)                                    \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #ifdef CONFIG_X86_32
 #define GET_CR0_INTO_EAX                               \
        push %ecx; push %edx;                           \
+       ANNOTATE_RETPOLINE_SAFE;                                \
        call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
        pop %edx; pop %ecx
 #else  /* !CONFIG_X86_32 */
@@ -917,21 +922,25 @@ extern void default_banner(void);
  */
 #define SWAPGS                                                         \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
-                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
+                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
                 )
 
 #define GET_CR2_INTO_RAX                               \
-       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
+       ANNOTATE_RETPOLINE_SAFE;                                \
+       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
 
 #define USERGS_SYSRET64                                                        \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
-                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
+                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
 
 #ifdef CONFIG_DEBUG_ENTRY
 #define SAVE_FLAGS(clobbers)                                        \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+                 ANNOTATE_RETPOLINE_SAFE;                                  \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 #endif
index f624f1f..180bc0b 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/desc_defs.h>
 #include <asm/kmap_types.h>
 #include <asm/pgtable_types.h>
+#include <asm/nospec-branch.h>
 
 struct page;
 struct thread_struct;
@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
  * offset into the paravirt_patch_template structure, and can therefore be
  * freely converted back into a structure offset.
  */
-#define PARAVIRT_CALL  "call *%c[paravirt_opptr];"
+#define PARAVIRT_CALL                                  \
+       ANNOTATE_RETPOLINE_SAFE                         \
+       "call *%c[paravirt_opptr];"
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
index ba3c523..a06b073 100644 (file)
@@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr,
 {
        bool oldbit;
 
-       asm volatile("bt "__percpu_arg(2)",%1"
+       asm volatile("btl "__percpu_arg(2)",%1"
                        CC_SET(c)
                        : CC_OUT(c) (oldbit)
                        : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
index 63c2552..b444d83 100644 (file)
@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 {
        pmdval_t v = native_pmd_val(pmd);
 
-       return __pmd(v | set);
+       return native_make_pmd(v | set);
 }
 
 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 {
        pmdval_t v = native_pmd_val(pmd);
 
-       return __pmd(v & ~clear);
+       return native_make_pmd(v & ~clear);
 }
 
 static inline pmd_t pmd_mkold(pmd_t pmd)
@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
 {
        pudval_t v = native_pud_val(pud);
 
-       return __pud(v | set);
+       return native_make_pud(v | set);
 }
 
 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
 {
        pudval_t v = native_pud_val(pud);
 
-       return __pud(v & ~clear);
+       return native_make_pud(v & ~clear);
 }
 
 static inline pud_t pud_mkold(pud_t pud)
index e554667..b3ec519 100644 (file)
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
 void paging_init(void);
+void sync_initial_page_table(void);
 
 /*
  * Define this if things work differently on an i386 and an i486:
index 81462e9..1149d21 100644 (file)
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
 #define swapper_pg_dir init_top_pgt
 
 extern void paging_init(void);
+static inline void sync_initial_page_table(void) { }
 
 #define pte_ERROR(e)                                   \
        pr_err("%s:%d: bad pte %p(%016lx)\n",           \
index 3696398..246f15b 100644 (file)
@@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
 #else
 #include <asm-generic/pgtable-nopud.h>
 
+static inline pud_t native_make_pud(pudval_t val)
+{
+       return (pud_t) { .p4d.pgd = native_make_pgd(val) };
+}
+
 static inline pudval_t native_pud_val(pud_t pud)
 {
        return native_pgd_val(pud.p4d.pgd);
@@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
 #else
 #include <asm-generic/pgtable-nopmd.h>
 
+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+       return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
+}
+
 static inline pmdval_t native_pmd_val(pmd_t pmd)
 {
        return native_pgd_val(pmd.pud.p4d.pgd);
index 1bd9ed8..b0ccd48 100644 (file)
@@ -977,4 +977,5 @@ bool xen_set_default_idle(void);
 
 void stop_this_cpu(void *dummy);
 void df_debug(struct pt_regs *regs, long error_code);
+void microcode_check(void);
 #endif /* _ASM_X86_PROCESSOR_H */
index 4e44250..4cf11d8 100644 (file)
@@ -17,7 +17,7 @@
 #define _REFCOUNT_EXCEPTION                            \
        ".pushsection .text..refcount\n"                \
        "111:\tlea %[counter], %%" _ASM_CX "\n"         \
-       "112:\t" ASM_UD0 "\n"                           \
+       "112:\t" ASM_UD2 "\n"                           \
        ASM_UNREACHABLE                                 \
        ".popsection\n"                                 \
        "113:\n"                                        \
@@ -67,13 +67,13 @@ static __always_inline __must_check
 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 {
        GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
-                                 r->refs.counter, "er", i, "%0", e);
+                                 r->refs.counter, "er", i, "%0", e, "cx");
 }
 
 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
 {
        GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
-                                r->refs.counter, "%0", e);
+                                r->refs.counter, "%0", e, "cx");
 }
 
 static __always_inline __must_check
index f91c365..4914a3e 100644 (file)
@@ -2,8 +2,7 @@
 #ifndef _ASM_X86_RMWcc
 #define _ASM_X86_RMWcc
 
-#define __CLOBBERS_MEM         "memory"
-#define __CLOBBERS_MEM_CC_CX   "memory", "cc", "cx"
+#define __CLOBBERS_MEM(clb...) "memory", ## clb
 
 #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
 
@@ -40,18 +39,19 @@ do {                                                                        \
 #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
 
 #define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
-       __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
+       __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
 
-#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc)            \
+#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
        __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc,                 \
-                   __CLOBBERS_MEM_CC_CX)
+                   __CLOBBERS_MEM(clobbers))
 
 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
        __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc,                \
-                   __CLOBBERS_MEM, vcon (val))
+                   __CLOBBERS_MEM(), vcon (val))
 
-#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc)        \
+#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc,        \
+                                 clobbers...)                          \
        __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc,  \
-                   __CLOBBERS_MEM_CC_CX, vcon (val))
+                   __CLOBBERS_MEM(clobbers), vcon (val))
 
 #endif /* _ASM_X86_RMWcc */
index 197c2e6..0994143 100644 (file)
 #define HV_X64_MSR_REENLIGHTENMENT_CONTROL     0x40000106
 
 struct hv_reenlightenment_control {
-       u64 vector:8;
-       u64 reserved1:8;
-       u64 enabled:1;
-       u64 reserved2:15;
-       u64 target_vp:32;
+       __u64 vector:8;
+       __u64 reserved1:8;
+       __u64 enabled:1;
+       __u64 reserved2:15;
+       __u64 target_vp:32;
 };
 
 #define HV_X64_MSR_TSC_EMULATION_CONTROL       0x40000107
 #define HV_X64_MSR_TSC_EMULATION_STATUS                0x40000108
 
 struct hv_tsc_emulation_control {
-       u64 enabled:1;
-       u64 reserved:63;
+       __u64 enabled:1;
+       __u64 reserved:63;
 };
 
 struct hv_tsc_emulation_status {
-       u64 inprogress:1;
-       u64 reserved:63;
+       __u64 inprogress:1;
+       __u64 reserved:63;
 };
 
 #define HV_X64_MSR_HYPERCALL_ENABLE            0x00000001
index 7a2ade4..6cfa9c8 100644 (file)
@@ -26,6 +26,7 @@
 #define KVM_FEATURE_PV_EOI             6
 #define KVM_FEATURE_PV_UNHALT          7
 #define KVM_FEATURE_PV_TLB_FLUSH       9
+#define KVM_FEATURE_ASYNC_PF_VMEXIT    10
 
 /* The last 8 bits are used to indicate how to interpret the flags field
  * in pvclock structure. If no bits are set, all flags are ignored.
index 8ad2e41..7c55387 100644 (file)
@@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void)
        do {
                rep_nop();
                now = rdtsc();
-       } while ((now - start) < 40000000000UL / HZ &&
+       } while ((now - start) < 40000000000ULL / HZ &&
                time_before_eq(jiffies, end));
 }
 
index 3cc471b..bb6f7a2 100644 (file)
@@ -134,21 +134,40 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
 {
        struct apic_chip_data *apicd = apic_chip_data(irqd);
        struct irq_desc *desc = irq_data_to_desc(irqd);
+       bool managed = irqd_affinity_is_managed(irqd);
 
        lockdep_assert_held(&vector_lock);
 
        trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
                            apicd->cpu);
 
-       /* Setup the vector move, if required  */
-       if (apicd->vector && cpu_online(apicd->cpu)) {
+       /*
+        * If there is no vector associated or if the associated vector is
+        * the shutdown vector, which is associated to make PCI/MSI
+        * shutdown mode work, then there is nothing to release. Clear out
+        * prev_vector for this and the offlined target case.
+        */
+       apicd->prev_vector = 0;
+       if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
+               goto setnew;
+       /*
+        * If the target CPU of the previous vector is online, then mark
+        * the vector as move in progress and store it for cleanup when the
+        * first interrupt on the new vector arrives. If the target CPU is
+        * offline then the regular release mechanism via the cleanup
+        * vector is not possible and the vector can be immediately freed
+        * in the underlying matrix allocator.
+        */
+       if (cpu_online(apicd->cpu)) {
                apicd->move_in_progress = true;
                apicd->prev_vector = apicd->vector;
                apicd->prev_cpu = apicd->cpu;
        } else {
-               apicd->prev_vector = 0;
+               irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
+                               managed);
        }
 
+setnew:
        apicd->vector = newvec;
        apicd->cpu = newcpu;
        BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
index d71c8b5..bfca937 100644 (file)
@@ -300,6 +300,15 @@ retpoline_auto:
                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
                pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
        }
+
+       /*
+        * Retpoline means the kernel is safe because it has no indirect
+        * branches. But firmware isn't, so use IBRS to protect that.
+        */
+       if (boot_cpu_has(X86_FEATURE_IBRS)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+               pr_info("Enabling Restricted Speculation for firmware calls\n");
+       }
 }
 
 #undef pr_fmt
@@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
                return sprintf(buf, "Not affected\n");
 
-       return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+       return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
                       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
                       spectre_v2_module_string());
 }
 #endif
index 824aee0..348cf48 100644 (file)
@@ -1749,3 +1749,33 @@ static int __init init_cpu_syscore(void)
        return 0;
 }
 core_initcall(init_cpu_syscore);
+
+/*
+ * The microcode loader calls this upon late microcode load to recheck features,
+ * only when microcode has been updated. Caller holds microcode_mutex and CPU
+ * hotplug lock.
+ */
+void microcode_check(void)
+{
+       struct cpuinfo_x86 info;
+
+       perf_check_microcode();
+
+       /* Reload CPUID max function as it might've changed. */
+       info.cpuid_level = cpuid_eax(0);
+
+       /*
+        * Copy all capability leafs to pick up the synthetic ones so that
+        * memcmp() below doesn't fail on that. The ones coming from CPUID will
+        * get overwritten in get_cpu_cap().
+        */
+       memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
+
+       get_cpu_cap(&info);
+
+       if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
+               return;
+
+       pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
+       pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+}
index bdab7d2..fca759d 100644 (file)
@@ -1804,6 +1804,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
                goto out_common_fail;
        }
        closid = ret;
+       ret = 0;
 
        rdtgrp->closid = closid;
        list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
index 330b846..a998e1a 100644 (file)
@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        return patch_size;
 }
 
-static int apply_microcode_amd(int cpu)
+static enum ucode_state apply_microcode_amd(int cpu)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_amd *mc_amd;
@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
 
        p = find_patch(cpu);
        if (!p)
-               return 0;
+               return UCODE_NFOUND;
 
        mc_amd  = p->data;
        uci->mc = p->data;
@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
        if (rev >= mc_amd->hdr.patch_id) {
                c->microcode = rev;
                uci->cpu_sig.rev = rev;
-               return 0;
+               return UCODE_OK;
        }
 
        if (__apply_microcode_amd(mc_amd)) {
                pr_err("CPU%d: update failed for patch_level=0x%08x\n",
                        cpu, mc_amd->hdr.patch_id);
-               return -1;
+               return UCODE_ERROR;
        }
        pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
                mc_amd->hdr.patch_id);
@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
        uci->cpu_sig.rev = mc_amd->hdr.patch_id;
        c->microcode = mc_amd->hdr.patch_id;
 
-       return 0;
+       return UCODE_UPDATED;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
index 319dd65..aa1b9a4 100644 (file)
@@ -374,7 +374,7 @@ static int collect_cpu_info(int cpu)
 }
 
 struct apply_microcode_ctx {
-       int err;
+       enum ucode_state err;
 };
 
 static void apply_microcode_local(void *arg)
@@ -489,31 +489,30 @@ static void __exit microcode_dev_exit(void)
 /* fake device for request_firmware */
 static struct platform_device  *microcode_pdev;
 
-static int reload_for_cpu(int cpu)
+static enum ucode_state reload_for_cpu(int cpu)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        enum ucode_state ustate;
-       int err = 0;
 
        if (!uci->valid)
-               return err;
+               return UCODE_OK;
 
        ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
-       if (ustate == UCODE_OK)
-               apply_microcode_on_target(cpu);
-       else
-               if (ustate == UCODE_ERROR)
-                       err = -EINVAL;
-       return err;
+       if (ustate != UCODE_OK)
+               return ustate;
+
+       return apply_microcode_on_target(cpu);
 }
 
 static ssize_t reload_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t size)
 {
+       enum ucode_state tmp_ret = UCODE_OK;
+       bool do_callback = false;
        unsigned long val;
+       ssize_t ret = 0;
        int cpu;
-       ssize_t ret = 0, tmp_ret;
 
        ret = kstrtoul(buf, 0, &val);
        if (ret)
@@ -526,15 +525,21 @@ static ssize_t reload_store(struct device *dev,
        mutex_lock(&microcode_mutex);
        for_each_online_cpu(cpu) {
                tmp_ret = reload_for_cpu(cpu);
-               if (tmp_ret != 0)
+               if (tmp_ret > UCODE_NFOUND) {
                        pr_warn("Error reloading microcode on CPU %d\n", cpu);
 
-               /* save retval of the first encountered reload error */
-               if (!ret)
-                       ret = tmp_ret;
+                       /* set retval for the first encountered reload error */
+                       if (!ret)
+                               ret = -EINVAL;
+               }
+
+               if (tmp_ret == UCODE_UPDATED)
+                       do_callback = true;
        }
-       if (!ret)
-               perf_check_microcode();
+
+       if (!ret && do_callback)
+               microcode_check();
+
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
index a15db2b..923054a 100644 (file)
@@ -772,7 +772,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
        return 0;
 }
 
-static int apply_microcode_intel(int cpu)
+static enum ucode_state apply_microcode_intel(int cpu)
 {
        struct microcode_intel *mc;
        struct ucode_cpu_info *uci;
@@ -782,7 +782,7 @@ static int apply_microcode_intel(int cpu)
 
        /* We should bind the task to the CPU */
        if (WARN_ON(raw_smp_processor_id() != cpu))
-               return -1;
+               return UCODE_ERROR;
 
        uci = ucode_cpu_info + cpu;
        mc = uci->mc;
@@ -790,7 +790,7 @@ static int apply_microcode_intel(int cpu)
                /* Look for a newer patch in our cache: */
                mc = find_patch(uci);
                if (!mc)
-                       return 0;
+                       return UCODE_NFOUND;
        }
 
        /* write microcode via MSR 0x79 */
@@ -801,7 +801,7 @@ static int apply_microcode_intel(int cpu)
        if (rev != mc->hdr.rev) {
                pr_err("CPU%d update to revision 0x%x failed\n",
                       cpu, mc->hdr.rev);
-               return -1;
+               return UCODE_ERROR;
        }
 
        if (rev != prev_rev) {
@@ -818,7 +818,7 @@ static int apply_microcode_intel(int cpu)
        uci->cpu_sig.rev = rev;
        c->microcode = rev;
 
-       return 0;
+       return UCODE_UPDATED;
 }
 
 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
index 04a625f..0f545b3 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/nops.h>
 #include "../entry/calling.h"
 #include <asm/export.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
 
        /* Ensure I am executing from virtual addresses */
        movq    $1f, %rax
+       ANNOTATE_RETPOLINE_SAFE
        jmp     *%rax
 1:
        UNWIND_HINT_EMPTY
index 4e37d1a..bc1a272 100644 (file)
@@ -49,7 +49,7 @@
 
 static int kvmapf = 1;
 
-static int parse_no_kvmapf(char *arg)
+static int __init parse_no_kvmapf(char *arg)
 {
         kvmapf = 0;
         return 0;
@@ -58,7 +58,7 @@ static int parse_no_kvmapf(char *arg)
 early_param("no-kvmapf", parse_no_kvmapf);
 
 static int steal_acc = 1;
-static int parse_no_stealacc(char *arg)
+static int __init parse_no_stealacc(char *arg)
 {
         steal_acc = 0;
         return 0;
@@ -67,7 +67,7 @@ static int parse_no_stealacc(char *arg)
 early_param("no-steal-acc", parse_no_stealacc);
 
 static int kvmclock_vsyscall = 1;
-static int parse_no_kvmclock_vsyscall(char *arg)
+static int __init parse_no_kvmclock_vsyscall(char *arg)
 {
         kvmclock_vsyscall = 0;
         return 0;
@@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void)
 #endif
                pa |= KVM_ASYNC_PF_ENABLED;
 
-               /* Async page fault support for L1 hypervisor is optional */
-               if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
-                       (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
-                       wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
+               if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
+                       pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
+
+               wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
                __this_cpu_write(apf_reason.enabled, 1);
                printk(KERN_INFO"KVM setup async PF for cpu %d\n",
                       smp_processor_id());
@@ -545,7 +545,8 @@ static void __init kvm_guest_init(void)
                pv_time_ops.steal_clock = kvm_steal_clock;
        }
 
-       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH))
+       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+           !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
                pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
 
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -633,7 +634,8 @@ static __init int kvm_setup_pv_tlb_flush(void)
 {
        int cpu;
 
-       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) {
+       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+           !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                for_each_possible_cpu(cpu) {
                        zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
                                GFP_KERNEL, cpu_to_node(cpu));
index 1ae67e9..4c616be 100644 (file)
@@ -1204,20 +1204,13 @@ void __init setup_arch(char **cmdline_p)
 
        kasan_init();
 
-#ifdef CONFIG_X86_32
-       /* sync back kernel address range */
-       clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       KERNEL_PGD_PTRS);
-
        /*
-        * sync back low identity map too.  It is used for example
-        * in the 32-bit EFI stub.
+        * Sync back kernel address range.
+        *
+        * FIXME: Can the later sync in setup_cpu_entry_areas() replace
+        * this call?
         */
-       clone_pgd_range(initial_page_table,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-#endif
+       sync_initial_page_table();
 
        tboot_probe();
 
index 497aa76..ea554f8 100644 (file)
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
        /* Setup cpu initialized, callin, callout masks */
        setup_cpu_local_masks();
 
-#ifdef CONFIG_X86_32
        /*
         * Sync back kernel address range again.  We already did this in
         * setup_arch(), but percpu data also needs to be available in
         * the smpboot asm.  We can't reliably pick up percpu mappings
         * using vmalloc_fault(), because exception dispatch needs
         * percpu data.
+        *
+        * FIXME: Can the later sync in setup_cpu_entry_areas() replace
+        * this call?
         */
-       clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       KERNEL_PGD_PTRS);
-
-       /*
-        * sync back low identity map too.  It is used for example
-        * in the 32-bit EFI stub.
-        */
-       clone_pgd_range(initial_page_table,
-                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
-                       min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-#endif
+       sync_initial_page_table();
 }
index 9eee25d..ff99e2b 100644 (file)
@@ -1437,6 +1437,7 @@ static void remove_siblinginfo(int cpu)
        cpumask_clear(topology_sibling_cpumask(cpu));
        cpumask_clear(topology_core_cpumask(cpu));
        c->cpu_core_id = 0;
+       c->booted_cores = 0;
        cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
        recompute_smt_state();
 }
index 1f9188f..feb28fe 100644 (file)
@@ -5,7 +5,6 @@
 #include <asm/unwind.h>
 #include <asm/orc_types.h>
 #include <asm/orc_lookup.h>
-#include <asm/sections.h>
 
 #define orc_warn(fmt, ...) \
        printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
@@ -148,7 +147,7 @@ static struct orc_entry *orc_find(unsigned long ip)
        }
 
        /* vmlinux .init slow lookup: */
-       if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext)
+       if (init_kernel_text(ip))
                return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
                                  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
 
index a0c5a69..b671fc2 100644 (file)
@@ -607,7 +607,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                             (1 << KVM_FEATURE_PV_EOI) |
                             (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
                             (1 << KVM_FEATURE_PV_UNHALT) |
-                            (1 << KVM_FEATURE_PV_TLB_FLUSH);
+                            (1 << KVM_FEATURE_PV_TLB_FLUSH) |
+                            (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
 
                if (sched_info_on())
                        entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
index 924ac8c..391dda8 100644 (file)
@@ -2002,14 +2002,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 
 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
-       struct kvm_lapic *apic;
+       struct kvm_lapic *apic = vcpu->arch.apic;
        int i;
 
-       apic_debug("%s\n", __func__);
+       if (!apic)
+               return;
 
-       ASSERT(vcpu);
-       apic = vcpu->arch.apic;
-       ASSERT(apic != NULL);
+       apic_debug("%s\n", __func__);
 
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
@@ -2165,7 +2164,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
         */
        vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
        static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
-       kvm_lapic_reset(vcpu, false);
        kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
 
        return 0;
@@ -2569,7 +2567,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 
        pe = xchg(&apic->pending_events, 0);
        if (test_bit(KVM_APIC_INIT, &pe)) {
-               kvm_lapic_reset(vcpu, true);
                kvm_vcpu_reset(vcpu, true);
                if (kvm_vcpu_is_bsp(apic->vcpu))
                        vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
index 46ff304..f551962 100644 (file)
@@ -3029,7 +3029,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
                return RET_PF_RETRY;
        }
 
-       return -EFAULT;
+       return RET_PF_EMULATE;
 }
 
 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
index b3e488a..be9c839 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
@@ -178,6 +179,8 @@ struct vcpu_svm {
        uint64_t sysenter_eip;
        uint64_t tsc_aux;
 
+       u64 msr_decfg;
+
        u64 next_rip;
 
        u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
@@ -300,6 +303,8 @@ module_param(vgif, int, 0444);
 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
 module_param(sev, int, 0444);
 
+static u8 rsm_ins_bytes[] = "\x0f\xaa";
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -1383,6 +1388,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_SKINIT);
        set_intercept(svm, INTERCEPT_WBINVD);
        set_intercept(svm, INTERCEPT_XSETBV);
+       set_intercept(svm, INTERCEPT_RSM);
 
        if (!kvm_mwait_in_guest()) {
                set_intercept(svm, INTERCEPT_MONITOR);
@@ -1902,6 +1908,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        u32 dummy;
        u32 eax = 1;
 
+       vcpu->arch.microcode_version = 0x01000065;
        svm->spec_ctrl = 0;
 
        if (!init_event) {
@@ -3699,6 +3706,12 @@ static int emulate_on_interception(struct vcpu_svm *svm)
        return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
+static int rsm_interception(struct vcpu_svm *svm)
+{
+       return x86_emulate_instruction(&svm->vcpu, 0, 0,
+                                      rsm_ins_bytes, 2) == EMULATE_DONE;
+}
+
 static int rdpmc_interception(struct vcpu_svm *svm)
 {
        int err;
@@ -3860,6 +3873,22 @@ static int cr8_write_interception(struct vcpu_svm *svm)
        return 0;
 }
 
+static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       msr->data = 0;
+
+       switch (msr->index) {
+       case MSR_F10H_DECFG:
+               if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+                       msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+               break;
+       default:
+               return 1;
+       }
+
+       return 0;
+}
+
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3935,9 +3964,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                msr_info->data = svm->spec_ctrl;
                break;
-       case MSR_IA32_UCODE_REV:
-               msr_info->data = 0x01000065;
-               break;
        case MSR_F15H_IC_CFG: {
 
                int family, model;
@@ -3955,6 +3981,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data = 0x1E;
                }
                break;
+       case MSR_F10H_DECFG:
+               msr_info->data = svm->msr_decfg;
+               break;
        default:
                return kvm_get_msr_common(vcpu, msr_info);
        }
@@ -4133,6 +4162,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
+       case MSR_F10H_DECFG: {
+               struct kvm_msr_entry msr_entry;
+
+               msr_entry.index = msr->index;
+               if (svm_get_msr_feature(&msr_entry))
+                       return 1;
+
+               /* Check the supported bits */
+               if (data & ~msr_entry.data)
+                       return 1;
+
+               /* Don't allow the guest to change a bit, #GP */
+               if (!msr->host_initiated && (data ^ msr_entry.data))
+                       return 1;
+
+               svm->msr_decfg = data;
+               break;
+       }
        case MSR_IA32_APICBASE:
                if (kvm_vcpu_apicv_active(vcpu))
                        avic_update_vapic_bar(to_svm(vcpu), data);
@@ -4541,7 +4588,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_MWAIT]                        = mwait_interception,
        [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = npf_interception,
-       [SVM_EXIT_RSM]                          = emulate_on_interception,
+       [SVM_EXIT_RSM]                          = rsm_interception,
        [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
        [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
 };
@@ -5355,7 +5402,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * being speculatively taken.
         */
        if (svm->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -5464,11 +5511,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * If the L02 MSR bitmap does not intercept the MSR, then we need to
         * save it.
         */
-       if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-               rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+               svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
        if (svm->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -6236,16 +6283,18 @@ e_free:
 
 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
+       void __user *measure = (void __user *)(uintptr_t)argp->data;
        struct kvm_sev_info *sev = &kvm->arch.sev_info;
        struct sev_data_launch_measure *data;
        struct kvm_sev_launch_measure params;
+       void __user *p = NULL;
        void *blob = NULL;
        int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
 
-       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+       if (copy_from_user(&params, measure, sizeof(params)))
                return -EFAULT;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -6256,17 +6305,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (!params.len)
                goto cmd;
 
-       if (params.uaddr) {
+       p = (void __user *)(uintptr_t)params.uaddr;
+       if (p) {
                if (params.len > SEV_FW_BLOB_MAX_SIZE) {
                        ret = -EINVAL;
                        goto e_free;
                }
 
-               if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
-                       ret = -EFAULT;
-                       goto e_free;
-               }
-
                ret = -ENOMEM;
                blob = kmalloc(params.len, GFP_KERNEL);
                if (!blob)
@@ -6290,13 +6335,13 @@ cmd:
                goto e_free_blob;
 
        if (blob) {
-               if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
+               if (copy_to_user(p, blob, params.len))
                        ret = -EFAULT;
        }
 
 done:
        params.len = data->len;
-       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+       if (copy_to_user(measure, &params, sizeof(params)))
                ret = -EFAULT;
 e_free_blob:
        kfree(blob);
@@ -6597,7 +6642,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
        struct page **pages;
        void *blob, *hdr;
        unsigned long n;
-       int ret;
+       int ret, offset;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6623,6 +6668,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (!data)
                goto e_unpin_memory;
 
+       offset = params.guest_uaddr & (PAGE_SIZE - 1);
+       data->guest_address = __sme_page_pa(pages[0]) + offset;
+       data->guest_len = params.guest_len;
+
        blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
        if (IS_ERR(blob)) {
                ret = PTR_ERR(blob);
@@ -6637,8 +6686,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
                ret = PTR_ERR(hdr);
                goto e_free_blob;
        }
-       data->trans_address = __psp_pa(blob);
-       data->trans_len = params.trans_len;
+       data->hdr_address = __psp_pa(hdr);
+       data->hdr_len = params.hdr_len;
 
        data->handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
@@ -6821,6 +6870,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .vcpu_unblocking = svm_vcpu_unblocking,
 
        .update_bp_intercept = update_bp_intercept,
+       .get_msr_feature = svm_get_msr_feature,
        .get_msr = svm_get_msr,
        .set_msr = svm_set_msr,
        .get_segment_base = svm_get_segment_base,
index 3dec126..051dab7 100644 (file)
@@ -51,6 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include "trace.h"
@@ -3226,6 +3227,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
        return !(val & ~valid_bits);
 }
 
+static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       return 1;
+}
+
 /*
  * Reads an msr value (of 'msr_index') into 'pdata'.
  * Returns 0 on success, non-0 otherwise.
@@ -4485,7 +4491,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
                              SECONDARY_EXEC_DESC);
                hw_cr4 &= ~X86_CR4_UMIP;
-       } else
+       } else if (!is_guest_mode(vcpu) ||
+                  !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
                vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
                                SECONDARY_EXEC_DESC);
 
@@ -5765,6 +5772,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        vmx->rmode.vm86_active = 0;
        vmx->spec_ctrl = 0;
 
+       vcpu->arch.microcode_version = 0x100000000ULL;
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        kvm_set_cr8(vcpu, 0);
 
@@ -9452,7 +9460,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * being speculatively taken.
         */
        if (vmx->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
 
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
@@ -9587,11 +9595,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * If the L02 MSR bitmap does not intercept the MSR, then we need to
         * save it.
         */
-       if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-               rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+       if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+               vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
        if (vmx->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -11199,7 +11207,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (ret)
                return ret;
 
-       if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
+       /*
+        * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
+        * by event injection, halt vcpu.
+        */
+       if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
+           !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))
                return kvm_vcpu_halt(vcpu);
 
        vmx->nested.nested_run_pending = 1;
@@ -12290,6 +12303,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .vcpu_put = vmx_vcpu_put,
 
        .update_bp_intercept = update_exception_bitmap,
+       .get_msr_feature = vmx_get_msr_feature,
        .get_msr = vmx_get_msr,
        .set_msr = vmx_set_msr,
        .get_segment_base = vmx_get_segment_base,
index c8a0b54..18b5ca7 100644 (file)
@@ -1049,6 +1049,45 @@ static u32 emulated_msrs[] = {
 
 static unsigned num_emulated_msrs;
 
+/*
+ * List of msr numbers which are used to expose MSR-based features that
+ * can be used by a hypervisor to validate requested CPU features.
+ */
+static u32 msr_based_features[] = {
+       MSR_F10H_DECFG,
+       MSR_IA32_UCODE_REV,
+};
+
+static unsigned int num_msr_based_features;
+
+static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       switch (msr->index) {
+       case MSR_IA32_UCODE_REV:
+               rdmsrl(msr->index, msr->data);
+               break;
+       default:
+               if (kvm_x86_ops->get_msr_feature(msr))
+                       return 1;
+       }
+       return 0;
+}
+
+static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+{
+       struct kvm_msr_entry msr;
+       int r;
+
+       msr.index = index;
+       r = kvm_get_msr_feature(&msr);
+       if (r)
+               return r;
+
+       *data = msr.data;
+
+       return 0;
+}
+
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        if (efer & efer_reserved_bits)
@@ -2222,7 +2261,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
        switch (msr) {
        case MSR_AMD64_NB_CFG:
-       case MSR_IA32_UCODE_REV:
        case MSR_IA32_UCODE_WRITE:
        case MSR_VM_HSAVE_PA:
        case MSR_AMD64_PATCH_LOADER:
@@ -2230,6 +2268,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_DC_CFG:
                break;
 
+       case MSR_IA32_UCODE_REV:
+               if (msr_info->host_initiated)
+                       vcpu->arch.microcode_version = data;
+               break;
        case MSR_EFER:
                return set_efer(vcpu, data);
        case MSR_K7_HWCR:
@@ -2525,7 +2567,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = 0;
                break;
        case MSR_IA32_UCODE_REV:
-               msr_info->data = 0x100000000ULL;
+               msr_info->data = vcpu->arch.microcode_version;
                break;
        case MSR_MTRRcap:
        case 0x200 ... 0x2ff:
@@ -2680,13 +2722,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
                    int (*do_msr)(struct kvm_vcpu *vcpu,
                                  unsigned index, u64 *data))
 {
-       int i, idx;
+       int i;
 
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
        for (i = 0; i < msrs->nmsrs; ++i)
                if (do_msr(vcpu, entries[i].index, &entries[i].data))
                        break;
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        return i;
 }
@@ -2785,6 +2825,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SET_BOOT_CPU_ID:
        case KVM_CAP_SPLIT_IRQCHIP:
        case KVM_CAP_IMMEDIATE_EXIT:
+       case KVM_CAP_GET_MSR_FEATURES:
                r = 1;
                break;
        case KVM_CAP_ADJUST_CLOCK:
@@ -2899,6 +2940,31 @@ long kvm_arch_dev_ioctl(struct file *filp,
                        goto out;
                r = 0;
                break;
+       case KVM_GET_MSR_FEATURE_INDEX_LIST: {
+               struct kvm_msr_list __user *user_msr_list = argp;
+               struct kvm_msr_list msr_list;
+               unsigned int n;
+
+               r = -EFAULT;
+               if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
+                       goto out;
+               n = msr_list.nmsrs;
+               msr_list.nmsrs = num_msr_based_features;
+               if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
+                       goto out;
+               r = -E2BIG;
+               if (n < msr_list.nmsrs)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(user_msr_list->indices, &msr_based_features,
+                                num_msr_based_features * sizeof(u32)))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_GET_MSRS:
+               r = msr_io(NULL, argp, do_get_msr_feature, 1);
+               break;
        }
        default:
                r = -EINVAL;
@@ -3636,12 +3702,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_MSRS:
+       case KVM_GET_MSRS: {
+               int idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = msr_io(vcpu, argp, do_get_msr, 1);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
-       case KVM_SET_MSRS:
+       }
+       case KVM_SET_MSRS: {
+               int idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = msr_io(vcpu, argp, do_set_msr, 0);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
+       }
        case KVM_TPR_ACCESS_REPORTING: {
                struct kvm_tpr_access_ctl tac;
 
@@ -4464,6 +4536,19 @@ static void kvm_init_msr_list(void)
                j++;
        }
        num_emulated_msrs = j;
+
+       for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+               struct kvm_msr_entry msr;
+
+               msr.index = msr_based_features[i];
+               if (kvm_get_msr_feature(&msr))
+                       continue;
+
+               if (j < i)
+                       msr_based_features[j] = msr_based_features[i];
+               j++;
+       }
+       num_msr_based_features = j;
 }
 
 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -8017,6 +8102,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
+       kvm_lapic_reset(vcpu, init_event);
+
        vcpu->arch.hflags = 0;
 
        vcpu->arch.smi_pending = 0;
@@ -8460,10 +8547,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
                        return r;
        }
 
-       if (!size) {
-               r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
-               WARN_ON(r < 0);
-       }
+       if (!size)
+               vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
 
        return 0;
 }
index 91e9700..25a972c 100644 (file)
@@ -28,7 +28,6 @@ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
 lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 lib-$(CONFIG_RETPOLINE) += retpoline.o
-OBJECT_FILES_NON_STANDARD_retpoline.o :=y
 
 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
 
index 480edc3..c909961 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
-#include <asm/bitsperlong.h>
 
 .macro THUNK reg
        .section .text.__x86.indirect_thunk
@@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
 GENERATE_THUNK(r14)
 GENERATE_THUNK(r15)
 #endif
-
-/*
- * Fill the CPU return stack buffer.
- *
- * Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
- *
- * This is required in various cases for retpoline and IBRS-based
- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
- * eliminate potentially bogus entries from the RSB, and sometimes
- * purely to ensure that it doesn't get empty, which on some CPUs would
- * allow predictions from other (unwanted!) sources to be used.
- *
- * Google experimented with loop-unrolling and this turned out to be
- * the optimal version - two calls, each with their own speculation
- * trap should their return address end up getting used, in a loop.
- */
-.macro STUFF_RSB nr:req sp:req
-       mov     $(\nr / 2), %_ASM_BX
-       .align 16
-771:
-       call    772f
-773:                                           /* speculation trap */
-       pause
-       lfence
-       jmp     773b
-       .align 16
-772:
-       call    774f
-775:                                           /* speculation trap */
-       pause
-       lfence
-       jmp     775b
-       .align 16
-774:
-       dec     %_ASM_BX
-       jnz     771b
-       add     $((BITS_PER_LONG/8) * \nr), \sp
-.endm
-
-#define RSB_FILL_LOOPS         16      /* To avoid underflow */
-
-ENTRY(__fill_rsb)
-       STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
-       ret
-END(__fill_rsb)
-EXPORT_SYMBOL_GPL(__fill_rsb)
-
-#define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
-
-ENTRY(__clear_rsb)
-       STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
-       ret
-END(__clear_rsb)
-EXPORT_SYMBOL_GPL(__clear_rsb)
index b9283cc..476d810 100644 (file)
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
 
        for_each_possible_cpu(cpu)
                setup_cpu_entry_area(cpu);
+
+       /*
+        * This is the last essential update to swapper_pgdir which needs
+        * to be synchronized to initial_page_table on 32bit.
+        */
+       sync_initial_page_table();
 }
index 800de81..c88573d 100644 (file)
@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
        tsk = current;
        mm = tsk->mm;
 
-       /*
-        * Detect and handle instructions that would cause a page fault for
-        * both a tracked kernel page and a userspace page.
-        */
        prefetchw(&mm->mmap_sem);
 
        if (unlikely(kmmio_fault(regs, address)))
index 79cb066..396e1f0 100644 (file)
@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
 }
 #endif /* CONFIG_HIGHMEM */
 
+void __init sync_initial_page_table(void)
+{
+       clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
+
+       /*
+        * sync back low identity map too.  It is used for example
+        * in the 32-bit EFI stub.
+        */
+       clone_pgd_range(initial_page_table,
+                       swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+                       min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+}
+
 void __init native_pagetable_init(void)
 {
        unsigned long pfn, va;
index 01f682c..40a6085 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/page.h>
 #include <asm/processor-flags.h>
 #include <asm/msr-index.h>
+#include <asm/nospec-branch.h>
 
        .text
        .code64
@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
        movq    %rax, %r8               /* Workarea encryption routine */
        addq    $PAGE_SIZE, %r8         /* Workarea intermediate copy buffer */
 
+       ANNOTATE_RETPOLINE_SAFE
        call    *%rax                   /* Call the encryption routine */
 
        pop     %r12
index 174c597..a7a7677 100644 (file)
@@ -460,7 +460,7 @@ static int nmi_setup(void)
                goto fail;
 
        for_each_possible_cpu(cpu) {
-               if (!cpu)
+               if (!IS_ENABLED(CONFIG_SMP) || !cpu)
                        continue;
 
                memcpy(per_cpu(cpu_msrs, cpu).counters,
index 2c67bae..fb1df94 100644 (file)
@@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
 
 static void intel_mid_reboot(void)
 {
-       intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+       intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
 }
 
 static unsigned long __init intel_mid_calibrate_tsc(void)
index de53bd1..24bb759 100644 (file)
@@ -102,7 +102,7 @@ ENTRY(startup_32)
         * don't we'll eventually crash trying to execute encrypted
         * instructions.
         */
-       bt      $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+       btl     $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
        jnc     .Ldone
        movl    $MSR_K8_SYSCFG, %ecx
        rdmsr
index c047f42..3c2c253 100644 (file)
@@ -1376,8 +1376,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
        if (!xen_initial_domain()) {
                add_preferred_console("xenboot", 0, NULL);
-               add_preferred_console("tty", 0, NULL);
-               add_preferred_console("hvc", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
        } else {
@@ -1410,6 +1408,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
                xen_boot_params_init_edd();
        }
+
+       add_preferred_console("tty", 0, NULL);
+       add_preferred_console("hvc", 0, NULL);
+
 #ifdef CONFIG_PCI
        /* PCI BIOS service won't work from a PV guest. */
        pci_probe &= ~PCI_PROBE_BIOS;
index d9f96cc..1d83152 100644 (file)
@@ -1,12 +1,15 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/types.h>
 #include <linux/tick.h>
+#include <linux/percpu-defs.h>
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <xen/grant_table.h>
 #include <xen/events.h>
 
+#include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
 #include <asm/fixmap.h>
@@ -15,6 +18,8 @@
 #include "mmu.h"
 #include "pmu.h"
 
+static DEFINE_PER_CPU(u64, spec_ctrl);
+
 void xen_arch_pre_suspend(void)
 {
        xen_save_time_memory_area();
@@ -35,6 +40,9 @@ void xen_arch_post_suspend(int cancelled)
 
 static void xen_vcpu_notify_restore(void *data)
 {
+       if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+               wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
+
        /* Boot processor notified via generic timekeeping_resume() */
        if (smp_processor_id() == 0)
                return;
@@ -44,7 +52,15 @@ static void xen_vcpu_notify_restore(void *data)
 
 static void xen_vcpu_notify_suspend(void *data)
 {
+       u64 tmp;
+
        tick_suspend_local();
+
+       if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
+               rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+               this_cpu_write(spec_ctrl, tmp);
+               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       }
 }
 
 void xen_arch_resume(void)
index 623720a..732631c 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/dma-contiguous.h>
+#include <linux/dma-direct.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
                              unsigned long attrs)
 {
        unsigned long ret;
-       unsigned long uncached = 0;
+       unsigned long uncached;
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page *page = NULL;
 
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       ret = (unsigned long)page_address(page);
+       *handle = phys_to_dma(dev, page_to_phys(page));
 
-       /* We currently don't support coherent memory outside KSEG */
+#ifdef CONFIG_MMU
+       if (PageHighMem(page)) {
+               void *p;
 
+               p = dma_common_contiguous_remap(page, size, VM_MAP,
+                                               pgprot_noncached(PAGE_KERNEL),
+                                               __builtin_return_address(0));
+               if (!p) {
+                       if (!dma_release_from_contiguous(dev, page, count))
+                               __free_pages(page, get_order(size));
+               }
+               return p;
+       }
+#endif
+       ret = (unsigned long)page_address(page);
        BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
               ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 
        uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
-       *handle = virt_to_bus((void *)ret);
        __invalidate_dcache_range(ret, size);
 
        return (void *)uncached;
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
 static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
                            dma_addr_t dma_handle, unsigned long attrs)
 {
-       unsigned long addr = (unsigned long)vaddr +
-               XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-       struct page *page = virt_to_page(addr);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
-              addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+       unsigned long addr = (unsigned long)vaddr;
+       struct page *page;
+
+       if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
+           addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
+               addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+               page = virt_to_page(addr);
+       } else {
+#ifdef CONFIG_MMU
+               dma_common_free_remap(vaddr, size, VM_MAP);
+#endif
+               page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
+       }
 
        if (!dma_release_from_contiguous(dev, page, count))
                __free_pages(page, get_order(size));
index d776ec0..34aead7 100644 (file)
@@ -79,19 +79,75 @@ void __init zones_init(void)
        free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
 }
 
+#ifdef CONFIG_HIGHMEM
+static void __init free_area_high(unsigned long pfn, unsigned long end)
+{
+       for (; pfn < end; pfn++)
+               free_highmem_page(pfn_to_page(pfn));
+}
+
+static void __init free_highpages(void)
+{
+       unsigned long max_low = max_low_pfn;
+       struct memblock_region *mem, *res;
+
+       reset_all_zones_managed_pages();
+       /* set highmem page free */
+       for_each_memblock(memory, mem) {
+               unsigned long start = memblock_region_memory_base_pfn(mem);
+               unsigned long end = memblock_region_memory_end_pfn(mem);
+
+               /* Ignore complete lowmem entries */
+               if (end <= max_low)
+                       continue;
+
+               if (memblock_is_nomap(mem))
+                       continue;
+
+               /* Truncate partial highmem entries */
+               if (start < max_low)
+                       start = max_low;
+
+               /* Find and exclude any reserved regions */
+               for_each_memblock(reserved, res) {
+                       unsigned long res_start, res_end;
+
+                       res_start = memblock_region_reserved_base_pfn(res);
+                       res_end = memblock_region_reserved_end_pfn(res);
+
+                       if (res_end < start)
+                               continue;
+                       if (res_start < start)
+                               res_start = start;
+                       if (res_start > end)
+                               res_start = end;
+                       if (res_end > end)
+                               res_end = end;
+                       if (res_start != start)
+                               free_area_high(start, res_start);
+                       start = res_end;
+                       if (start == end)
+                               break;
+               }
+
+               /* And now free anything which remains */
+               if (start < end)
+                       free_area_high(start, end);
+       }
+}
+#else
+static void __init free_highpages(void)
+{
+}
+#endif
+
 /*
  * Initialize memory pages.
  */
 
 void __init mem_init(void)
 {
-#ifdef CONFIG_HIGHMEM
-       unsigned long tmp;
-
-       reset_all_zones_managed_pages();
-       for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
-               free_highmem_page(pfn_to_page(tmp));
-#endif
+       free_highpages();
 
        max_mapnr = max_pfn - ARCH_PFN_OFFSET;
        high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
index 4117524..c2033a2 100644 (file)
@@ -812,7 +812,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
        struct gendisk *disk;
        struct request_queue *q;
        struct blkcg_gq *blkg;
-       struct module *owner;
        unsigned int major, minor;
        int key_len, part, ret;
        char *body;
@@ -904,9 +903,7 @@ fail_unlock:
        spin_unlock_irq(q->queue_lock);
        rcu_read_unlock();
 fail:
-       owner = disk->fops->owner;
-       put_disk(disk);
-       module_put(owner);
+       put_disk_and_module(disk);
        /*
         * If queue was bypassing, we should retry.  Do so after a
         * short msleep().  It isn't strictly necessary but queue
@@ -931,13 +928,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
        __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 {
-       struct module *owner;
-
        spin_unlock_irq(ctx->disk->queue->queue_lock);
        rcu_read_unlock();
-       owner = ctx->disk->fops->owner;
-       put_disk(ctx->disk);
-       module_put(owner);
+       put_disk_and_module(ctx->disk);
 }
 EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
index 2d1a7bb..6d82c4f 100644 (file)
@@ -2434,7 +2434,7 @@ blk_qc_t submit_bio(struct bio *bio)
                unsigned int count;
 
                if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
-                       count = queue_logical_block_size(bio->bi_disk->queue);
+                       count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
                else
                        count = bio_sectors(bio);
 
index 3574927..16e83e6 100644 (file)
@@ -712,7 +712,6 @@ static void __blk_mq_requeue_request(struct request *rq)
 
        trace_block_rq_requeue(q, rq);
        wbt_requeue(q->rq_wb, &rq->issue_stat);
-       blk_mq_sched_requeue_request(rq);
 
        if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
                blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
@@ -725,6 +724,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
 {
        __blk_mq_requeue_request(rq);
 
+       /* this request will be re-inserted to io scheduler queue */
+       blk_mq_sched_requeue_request(rq);
+
        BUG_ON(blk_queued_rq(rq));
        blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
 }
index 88a53c1..9656f9e 100644 (file)
@@ -547,7 +547,7 @@ static int exact_lock(dev_t devt, void *data)
 {
        struct gendisk *p = data;
 
-       if (!get_disk(p))
+       if (!get_disk_and_module(p))
                return -1;
        return 0;
 }
@@ -717,6 +717,11 @@ void del_gendisk(struct gendisk *disk)
        blk_integrity_del(disk);
        disk_del_events(disk);
 
+       /*
+        * Block lookups of the disk until all bdevs are unhashed and the
+        * disk is marked as dead (GENHD_FL_UP cleared).
+        */
+       down_write(&disk->lookup_sem);
        /* invalidate stuff */
        disk_part_iter_init(&piter, disk,
                             DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
@@ -731,6 +736,7 @@ void del_gendisk(struct gendisk *disk)
        bdev_unhash_inode(disk_devt(disk));
        set_capacity(disk, 0);
        disk->flags &= ~GENHD_FL_UP;
+       up_write(&disk->lookup_sem);
 
        if (!(disk->flags & GENHD_FL_HIDDEN))
                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -809,16 +815,28 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
 
                spin_lock_bh(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               if (part && get_disk(part_to_disk(part))) {
+               if (part && get_disk_and_module(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
                spin_unlock_bh(&ext_devt_lock);
        }
 
-       if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) {
-               put_disk(disk);
+       if (!disk)
+               return NULL;
+
+       /*
+        * Synchronize with del_gendisk() to not return disk that is being
+        * destroyed.
+        */
+       down_read(&disk->lookup_sem);
+       if (unlikely((disk->flags & GENHD_FL_HIDDEN) ||
+                    !(disk->flags & GENHD_FL_UP))) {
+               up_read(&disk->lookup_sem);
+               put_disk_and_module(disk);
                disk = NULL;
+       } else {
+               up_read(&disk->lookup_sem);
        }
        return disk;
 }
@@ -1418,6 +1436,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
                        kfree(disk);
                        return NULL;
                }
+               init_rwsem(&disk->lookup_sem);
                disk->node_id = node_id;
                if (disk_expand_part_tbl(disk, 0)) {
                        free_part_stats(&disk->part0);
@@ -1453,7 +1472,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
 }
 EXPORT_SYMBOL(__alloc_disk_node);
 
-struct kobject *get_disk(struct gendisk *disk)
+struct kobject *get_disk_and_module(struct gendisk *disk)
 {
        struct module *owner;
        struct kobject *kobj;
@@ -1471,17 +1490,30 @@ struct kobject *get_disk(struct gendisk *disk)
        return kobj;
 
 }
-
-EXPORT_SYMBOL(get_disk);
+EXPORT_SYMBOL(get_disk_and_module);
 
 void put_disk(struct gendisk *disk)
 {
        if (disk)
                kobject_put(&disk_to_dev(disk)->kobj);
 }
-
 EXPORT_SYMBOL(put_disk);
 
+/*
+ * This is a counterpart of get_disk_and_module() and thus also of
+ * get_gendisk().
+ */
+void put_disk_and_module(struct gendisk *disk)
+{
+       if (disk) {
+               struct module *owner = disk->fops->owner;
+
+               put_disk(disk);
+               module_put(owner);
+       }
+}
+EXPORT_SYMBOL(put_disk_and_module);
+
 static void set_disk_ro_uevent(struct gendisk *gd, int ro)
 {
        char event[] = "DISK_RO=1";
index 1668506..3884d81 100644 (file)
@@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
 
        if (start + len > i_size_read(bdev->bd_inode))
                return -EINVAL;
-       truncate_inode_pages_range(mapping, start, start + len);
+       truncate_inode_pages_range(mapping, start, start + len - 1);
        return blkdev_issue_discard(bdev, start >> 9, len >> 9,
                                    GFP_KERNEL, flags);
 }
index f95c607..0d6d25e 100644 (file)
@@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = {
                .limit_depth = kyber_limit_depth,
                .prepare_request = kyber_prepare_request,
                .finish_request = kyber_finish_request,
+               .requeue_request = kyber_finish_request,
                .completed_request = kyber_completed_request,
                .dispatch_request = kyber_dispatch_request,
                .has_work = kyber_has_work,
index c56f211..8ec0ba9 100644 (file)
@@ -535,13 +535,22 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
        spin_unlock(&dd->lock);
 }
 
+/*
+ * Nothing to do here. This is defined only to ensure that .finish_request
+ * method is called upon request completion.
+ */
+static void dd_prepare_request(struct request *rq, struct bio *bio)
+{
+}
+
 /*
  * For zoned block devices, write unlock the target zone of
  * completed write requests. Do this while holding the zone lock
  * spinlock so that the zone is never unlocked while deadline_fifo_request()
- * while deadline_next_request() are executing.
+ * or deadline_next_request() are executing. This function is called for
+ * all requests, whether or not these requests complete successfully.
  */
-static void dd_completed_request(struct request *rq)
+static void dd_finish_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
 
@@ -756,7 +765,8 @@ static struct elevator_type mq_deadline = {
        .ops.mq = {
                .insert_requests        = dd_insert_requests,
                .dispatch_request       = dd_dispatch_request,
-               .completed_request      = dd_completed_request,
+               .prepare_request        = dd_prepare_request,
+               .finish_request         = dd_finish_request,
                .next_request           = elv_rb_latter_request,
                .former_request         = elv_rb_former_request,
                .bio_merge              = dd_bio_merge,
index 91622db..08dabcd 100644 (file)
@@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf)
 
 EXPORT_SYMBOL(bdevname);
 
+const char *bio_devname(struct bio *bio, char *buf)
+{
+       return disk_name(bio->bi_disk, bio->bi_partno, buf);
+}
+EXPORT_SYMBOL(bio_devname);
+
 /*
  * There's very little reason to use this, you should really
  * have a struct block_device just about everywhere and use
index 9ed51d0..e4929ee 100644 (file)
@@ -490,7 +490,7 @@ static int opal_discovery0_end(struct opal_dev *dev)
 
        if (!found_com_id) {
                pr_debug("Could not find OPAL comid for device. Returning early\n");
-               return -EOPNOTSUPP;;
+               return -EOPNOTSUPP;
        }
 
        dev->comid = comid;
index e5aa62f..3aaf6af 100644 (file)
@@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        if (unit[drive].type->code == FD_NODRIVE)
                return NULL;
        *part = 0;
-       return get_disk(unit[drive].gendisk);
+       return get_disk_and_module(unit[drive].gendisk);
 }
 
 static int __init amiga_floppy_probe(struct platform_device *pdev)
index 8bc3b9f..dfb2c26 100644 (file)
@@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
                return NULL;
        *part = 0;
-       return get_disk(unit[drive].disk);
+       return get_disk_and_module(unit[drive].disk);
 }
 
 static int __init atari_floppy_init (void)
index 8028a3a..deea78e 100644 (file)
@@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
 
        mutex_lock(&brd_devices_mutex);
        brd = brd_init_one(MINOR(dev) / max_part, &new);
-       kobj = brd ? get_disk(brd->brd_disk) : NULL;
+       kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
        mutex_unlock(&brd_devices_mutex);
 
        if (new)
index eae484a..8ec7235 100644 (file)
@@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
                return NULL;
        *part = 0;
-       return get_disk(disks[drive]);
+       return get_disk_and_module(disks[drive]);
 }
 
 static int __init do_floppy_init(void)
index d5fe720..87855b5 100644 (file)
@@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
        if (err < 0)
                kobj = NULL;
        else
-               kobj = get_disk(lo->lo_disk);
+               kobj = get_disk_and_module(lo->lo_disk);
        mutex_unlock(&loop_index_mutex);
 
        *part = 0;
index 5f2a424..86258b0 100644 (file)
@@ -1591,7 +1591,7 @@ again:
                        if (new_index < 0) {
                                mutex_unlock(&nbd_index_mutex);
                                printk(KERN_ERR "nbd: failed to add new device\n");
-                               return ret;
+                               return new_index;
                        }
                        nbd = idr_find(&nbd_index_idr, new_index);
                }
index 531a091..c61d20c 100644 (file)
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
        pkt->sector = new_sector;
 
        bio_reset(pkt->bio);
-       bio_set_set(pkt->bio, pd->bdev);
+       bio_set_dev(pkt->bio, pd->bdev);
        bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
        pkt->bio->bi_iter.bi_sector = new_sector;
        pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
index 84434d3..64e066e 100644 (file)
@@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
                return NULL;
 
        *part = 0;
-       return get_disk(swd->unit[drive].disk);
+       return get_disk_and_module(swd->unit[drive].disk);
 }
 
 static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
index 41c95c9..8f9130a 100644 (file)
@@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops =
 static struct kobject *z2_find(dev_t dev, int *part, void *data)
 {
        *part = 0;
-       return get_disk(z2ram_gendisk);
+       return get_disk_and_module(z2ram_gendisk);
 }
 
 static struct request_queue *z2_queue;
index c8e9ae6..fa4ce83 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/usb/quirks.h>
@@ -383,6 +384,21 @@ static const struct usb_device_id blacklist_table[] = {
        { }     /* Terminating entry */
 };
 
+/* The Bluetooth USB module build into some devices needs to be reset on resume,
+ * this is a problem with the platform (likely shutting off all power) not with
+ * the module itself. So we use a DMI list to match known broken platforms.
+ */
+static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
+       {
+               /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
+               },
+       },
+       {}
+};
+
 #define BTUSB_MAX_ISOC_FRAMES  10
 
 #define BTUSB_INTR_RUNNING     0
@@ -2955,6 +2971,9 @@ static int btusb_probe(struct usb_interface *intf,
        hdev->send   = btusb_send_frame;
        hdev->notify = btusb_notify;
 
+       if (dmi_check_system(btusb_needs_reset_resume_table))
+               interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+
 #ifdef CONFIG_PM
        err = btusb_config_oob_wake(hdev);
        if (err)
@@ -3041,12 +3060,6 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_QCA_ROME) {
                data->setup_on_usb = btusb_setup_qca;
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
-
-               /* QCA Rome devices lose their updated firmware over suspend,
-                * but the USB hub doesn't notice any status change.
-                * explicitly request a device reset on resume.
-                */
-               interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
        }
 
 #ifdef CONFIG_BT_HCIBTUSB_RTL
index 0438a64..6314dfb 100644 (file)
@@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
 
        dev->clk = devm_clk_get(dev->dev, NULL);
 
-       dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
-                                           GPIOD_OUT_LOW);
+       dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
+                                                    GPIOD_OUT_LOW);
        if (IS_ERR(dev->device_wakeup))
                return PTR_ERR(dev->device_wakeup);
 
-       dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
+       dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
+                                               GPIOD_OUT_LOW);
        if (IS_ERR(dev->shutdown))
                return PTR_ERR(dev->shutdown);
 
index 4d46003..cdaeeea 100644 (file)
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata)
        for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
                prop = of_get_property(np, sysc_dts_quirks[i].name, &len);
                if (!prop)
-                       break;
+                       continue;
 
                ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
        }
index 4d1dc8b..f95b9c7 100644 (file)
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
                            size_t count)
 {
        int size = 0;
-       int expected;
+       u32 expected;
 
        if (!chip)
                return -EBUSY;
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
        }
 
        expected = be32_to_cpu(*(__be32 *)(buf + 2));
-       if (expected > count) {
+       if (expected > count || expected < TPM_HEADER_SIZE) {
                size = -EIO;
                goto out;
        }
index 76df4fb..9e80a95 100644 (file)
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
                        break;
 
                recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+               if (recd > num_bytes) {
+                       total = -EFAULT;
+                       break;
+               }
 
                rlength = be32_to_cpu(tpm_cmd.header.out.length);
                if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
index c17e753..a700f8f 100644 (file)
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
        if (!rc) {
                data_len = be16_to_cpup(
                        (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+               if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE + 1) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
                                        ->header.out.length);
index c1dd39e..6116cd0 100644 (file)
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
 static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        int size = 0;
-       int expected, status;
+       int status;
+       u32 expected;
 
        if (count < TPM_HEADER_SIZE) {
                size = -EIO;
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        }
 
        expected = be32_to_cpu(*(__be32 *)(buf + 2));
-       if ((size_t) expected > count) {
+       if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
                size = -EIO;
                goto out;
        }
index c642877..caa86b1 100644 (file)
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        struct device *dev = chip->dev.parent;
        struct i2c_client *client = to_i2c_client(dev);
        s32 rc;
-       int expected, status, burst_count, retries, size = 0;
+       int status;
+       int burst_count;
+       int retries;
+       int size = 0;
+       u32 expected;
 
        if (count < TPM_HEADER_SIZE) {
                i2c_nuvoton_ready(chip);    /* return to idle */
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
                 * to machine native
                 */
                expected = be32_to_cpu(*(__be32 *) (buf + 2));
-               if (expected > count) {
+               if (expected > count || expected < size) {
                        dev_err(dev, "%s() expected > count\n", __func__);
                        size = -EIO;
                        continue;
index 183a5f5..da074e3 100644 (file)
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
        int size = 0;
-       int expected, status;
+       int status;
+       u32 expected;
 
        if (count < TPM_HEADER_SIZE) {
                size = -EIO;
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        }
 
        expected = be32_to_cpu(*(__be32 *) (buf + 2));
-       if (expected > count) {
+       if (expected > count || expected < TPM_HEADER_SIZE) {
                size = -EIO;
                goto out;
        }
index 4927355..471b428 100644 (file)
@@ -251,9 +251,14 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
        int irq_reenable = clockevent_state_periodic(evt);
 
        /*
-        * Any write to CTRL reg ACks the interrupt, we rewrite the
-        * Count when [N]ot [H]alted bit.
-        * And re-arm it if perioid by [I]nterrupt [E]nable bit
+        * 1. ACK the interrupt
+        *    - For ARC700, any write to CTRL reg ACKs it, so just rewrite
+        *      Count when [N]ot [H]alted bit.
+        *    - For HS3x, it is a bit subtle. On taken count-down interrupt,
+        *      IP bit [3] is set, which needs to be cleared for ACK'ing.
+        *      The write below can only update the other two bits, hence
+        *      explicitly clears IP bit
+        * 2. Re-arm interrupt if periodic by writing to IE bit [0]
         */
        write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
 
index 3ee7e6f..846d18d 100644 (file)
@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
 
 static unsigned long __init ftm_clk_init(struct device_node *np)
 {
-       unsigned long freq;
+       long freq;
 
        freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
        if (freq <= 0)
index a04808a..986b679 100644 (file)
@@ -166,7 +166,7 @@ static int __init __gic_clocksource_init(void)
 
        /* Set clocksource mask. */
        count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
-       count_width >>= __fls(GIC_CONFIG_COUNTBITS);
+       count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
        count_width *= 4;
        count_width += 32;
        gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node)
        } else if (of_property_read_u32(node, "clock-frequency",
                                        &gic_frequency)) {
                pr_err("GIC frequency not specified.\n");
-               return -EINVAL;;
+               return -EINVAL;
        }
        gic_timer_irq = irq_of_parse_and_map(node, 0);
        if (!gic_timer_irq) {
                pr_err("GIC timer IRQ not specified.\n");
-               return -EINVAL;;
+               return -EINVAL;
        }
 
        ret = __gic_clocksource_init();
index 2a3fe83..3b56ea3 100644 (file)
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node)
        timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
        if (IS_ERR(timer_base)) {
                pr_err("Can't map registers\n");
-               return PTR_ERR(timer_base);;
+               return PTR_ERR(timer_base);
        }
 
        irq = irq_of_parse_and_map(node, 0);
index 3a88e33..fb586e0 100644 (file)
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ
 
 config ARM_SCPI_CPUFREQ
        tristate "SCPI based CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+       depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
        help
-         This adds the CPUfreq driver support for ARM big.LITTLE platforms
-         using SCPI protocol for CPU power management.
+         This adds the CPUfreq driver support for ARM platforms using SCPI
+         protocol for CPU power management.
 
          This driver uses SCPI Message Protocol driver to interact with the
          firmware providing the CPU DVFS functionality.
index 7b596fa..6bebc1f 100644 (file)
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
 static int s3c_cpufreq_init(struct cpufreq_policy *policy)
 {
        policy->clk = clk_arm;
-       return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+
+       policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+       if (ftab)
+               return cpufreq_table_validate_and_show(policy, ftab);
+
+       return 0;
 }
 
 static int __init s3c_cpufreq_initclks(void)
index c32a833..d300a16 100644 (file)
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
 static int
 scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
 {
+       unsigned long freq = policy->freq_table[index].frequency;
        struct scpi_data *priv = policy->driver_data;
-       u64 rate = policy->freq_table[index].frequency * 1000;
+       u64 rate = freq * 1000;
        int ret;
 
        ret = clk_set_rate(priv->clk, rate);
-       if (!ret && (clk_get_rate(priv->clk) != rate))
-               ret = -EIO;
 
-       return ret;
+       if (ret)
+               return ret;
+
+       if (clk_get_rate(priv->clk) != rate)
+               return -EIO;
+
+       arch_set_freq_scale(policy->related_cpus, freq,
+                           policy->cpuinfo.max_freq);
+
+       return 0;
 }
 
 static int
index fcfa5b1..b3afb6c 100644 (file)
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error)
 {
        int ret;
 
-       ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error);
+       ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
        if (ret)
                return ret;
 
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
                        return rc;
        }
 
-       return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
+       return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
 }
 
 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
                        return rc;
        }
 
-       return __sev_do_cmd_locked(cmd, 0, &argp->error);
+       return __sev_do_cmd_locked(cmd, NULL, &argp->error);
 }
 
 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission);
 
 int sev_guest_df_flush(int *error)
 {
-       return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error);
+       return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
 }
 EXPORT_SYMBOL_GPL(sev_guest_df_flush);
 
index 473af69..ecdc292 100644 (file)
@@ -246,12 +246,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
 {
        long avail;
 
-       /*
-        * The device driver is allowed to sleep, in order to make the
-        * memory directly accessible.
-        */
-       might_sleep();
-
        if (!dax_dev)
                return -EOPNOTSUPP;
 
index f34430f..8721002 100644 (file)
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
  * sbridge structs
  */
 
-#define NUM_CHANNELS           4       /* Max channels per MC */
+#define NUM_CHANNELS           6       /* Max channels per MC */
 #define MAX_DIMMS              3       /* Max DIMMS per channel */
 #define KNL_MAX_CHAS           38      /* KNL max num. of Cache Home Agents */
 #define KNL_MAX_CHANNELS       6       /* KNL max num. of PCI channels */
index 564bb7a..84e5a9d 100644 (file)
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 
                desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
                                                &of_flags);
+               /*
+                * -EPROBE_DEFER in our case means that we found a
+                * valid GPIO property, but no controller has been
+                * registered so far.
+                *
+                * This means we don't need to look any further for
+                * alternate name conventions, and we should really
+                * preserve the return code for our user to be able to
+                * retry probing later.
+                */
+               if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
+                       return desc;
+
                if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
                        break;
        }
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
                desc = of_find_spi_gpio(dev, con_id, &of_flags);
 
        /* Special handling for regulator GPIOs if used */
-       if (IS_ERR(desc))
+       if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
                desc = of_find_regulator_gpio(dev, con_id, &of_flags);
 
        if (IS_ERR(desc))
index d5a2eef..74edba1 100644 (file)
@@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
 /*
  * Writeback
  */
-#define AMDGPU_MAX_WB 512      /* Reserve at most 512 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 128      /* Reserve at most 128 WB slots for amdgpu-owned rings. */
 
 struct amdgpu_wb {
        struct amdgpu_bo        *wb_obj;
index 00a50cc..af1b879 100644 (file)
@@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
                memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 
                /* clear wb memory */
-               memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
+               memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
        }
 
        return 0;
@@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
  */
 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
 {
+       wb >>= 3;
        if (wb < adev->wb.num_wb)
-               __clear_bit(wb >> 3, adev->wb.used);
+               __clear_bit(wb, adev->wb.used);
 }
 
 /**
@@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.hw)
                        continue;
-               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
-                       amdgpu_free_static_csa(adev);
-                       amdgpu_device_wb_fini(adev);
-                       amdgpu_device_vram_scratch_fini(adev);
-               }
 
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                        adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
@@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
+
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+                       amdgpu_free_static_csa(adev);
+                       amdgpu_device_wb_fini(adev);
+                       amdgpu_device_vram_scratch_fini(adev);
+               }
+
                r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
                /* XXX handle errors */
                if (r) {
@@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
                                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
                        }
                        drm_modeset_unlock_all(dev);
-               } else {
-                       /*
-                        * There is no equivalent atomic helper to turn on
-                        * display, so we defined our own function for this,
-                        * once suspend resume is supported by the atomic
-                        * framework this will be reworked
-                        */
-                       amdgpu_dm_display_resume(adev);
                }
        }
 
@@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        if (amdgpu_device_has_dc_support(adev)) {
                if (drm_atomic_helper_resume(adev->ddev, state))
                        dev_info(adev->dev, "drm resume failed:%d\n", r);
-               amdgpu_dm_display_resume(adev);
        } else {
                drm_helper_resume_force_mode(adev->ddev);
        }
index e14ab34..7c2be32 100644 (file)
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
 static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
 {
        struct amdgpu_gtt_mgr *mgr = man->priv;
-
+       spin_lock(&mgr->lock);
        drm_mm_takedown(&mgr->mm);
        spin_unlock(&mgr->lock);
        kfree(mgr);
index 56bcd59..36483e0 100644 (file)
@@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
        r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
        if (r) {
                adev->irq.installed = false;
-               flush_work(&adev->hotplug_work);
+               if (!amdgpu_device_has_dc_support(adev))
+                       flush_work(&adev->hotplug_work);
                cancel_work_sync(&adev->reset_work);
                return r;
        }
@@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
                adev->irq.installed = false;
                if (adev->irq.msi_enabled)
                        pci_disable_msi(adev->pdev);
-               flush_work(&adev->hotplug_work);
+               if (!amdgpu_device_has_dc_support(adev))
+                       flush_work(&adev->hotplug_work);
                cancel_work_sync(&adev->reset_work);
        }
 
index 2719937..3b7e7af 100644 (file)
@@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle)
        for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
                BUG_ON(vm_inv_eng[i] > 16);
 
-       if (adev->asic_type == CHIP_VEGA10) {
+       if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
                r = gmc_v9_0_ecc_available(adev);
                if (r == 1) {
                        DRM_INFO("ECC is active.\n");
@@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
        adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
        if (!adev->mc.vram_width) {
                /* hbm memory channel size */
-               chansize = 128;
+               if (adev->flags & AMD_IS_APU)
+                       chansize = 64;
+               else
+                       chansize = 128;
 
                tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
                tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
index e92fb37..91cf95a 100644 (file)
@@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u64 *wptr = NULL;
-       uint64_t local_wptr = 0;
+       u64 wptr;
 
        if (ring->use_doorbell) {
                /* XXX check if swapping is necessary on BE */
-               wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
-               DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
-               *wptr = (*wptr) >> 2;
-               DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
+               wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+               DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
        } else {
                u32 lowbit, highbit;
                int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
-               wptr = &local_wptr;
                lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
                highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
 
                DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
                                me, highbit, lowbit);
-               *wptr = highbit;
-               *wptr = (*wptr) << 32;
-               *wptr |= lowbit;
+               wptr = highbit;
+               wptr = wptr << 32;
+               wptr |= lowbit;
        }
 
-       return *wptr;
+       return wptr >> 2;
 }
 
 /**
index b2bfeda..9bab484 100644 (file)
@@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
        .set_wptr = uvd_v6_0_enc_ring_set_wptr,
        .emit_frame_size =
                4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
-               6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+               5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
                5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
                1, /* uvd_v6_0_enc_ring_insert_end */
        .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
index 1ce4c98..862835d 100644 (file)
@@ -629,11 +629,13 @@ static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
        struct amdgpu_display_manager *dm = &adev->dm;
+       int ret = 0;
 
        /* power on hardware */
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
-       return 0;
+       ret = amdgpu_dm_display_resume(adev);
+       return ret;
 }
 
 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
index 61e8c3e..639421a 100644 (file)
@@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence(
        uint32_t retries_ch_eq;
        enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
        union lane_align_status_updated dpcd_lane_status_updated = {{0}};
-       union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+       union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
 
        hw_tr_pattern = get_supported_tp(link);
 
@@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream,
        /* MST doesn't perform link training for now
         * TODO: add MST specific link training routine
         */
-       if (is_mst_supported(link)) {
+       if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
                *link_setting = link->verified_link_cap;
                return;
        }
index 261811e..539c3e0 100644 (file)
@@ -197,7 +197,8 @@ bool dc_stream_set_cursor_attributes(
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
-               if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+               if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
+                   !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
                        continue;
                if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
                        continue;
@@ -273,7 +274,8 @@ bool dc_stream_set_cursor_position(
                if (pipe_ctx->stream != stream ||
                                (!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||
                                !pipe_ctx->plane_state ||
-                               (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+                               (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
+                               !pipe_ctx->plane_res.ipp)
                        continue;
 
                if (pipe_ctx->plane_state->address.type
index 4c3223a..adb6e7b 100644 (file)
@@ -162,7 +162,7 @@ static int pp_hw_init(void *handle)
                if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
                        pr_err("smc start failed\n");
                        hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
-                       return -EINVAL;;
+                       return -EINVAL;
                }
                if (ret == PP_DPM_DISABLED)
                        goto exit;
index 41e42be..08e8a79 100644 (file)
@@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
 
-       disable_mclk_switching = ((1 < info.display_count) ||
-                                 disable_mclk_switching_for_frame_lock ||
-                                 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
-                                 (mode_info.refresh_rate > 120));
+       if (info.display_count == 0)
+               disable_mclk_switching = false;
+       else
+               disable_mclk_switching = ((1 < info.display_count) ||
+                                         disable_mclk_switching_for_frame_lock ||
+                                         smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+                                         (mode_info.refresh_rate > 120));
 
        sclk = smu7_ps->performance_levels[0].engine_clock;
        mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
        int tmp_result, result = 0;
        uint32_t sclk_mask = 0, mclk_mask = 0;
 
-       if (hwmgr->chip_id == CHIP_FIJI) {
-               if (request->type == AMD_PP_GFX_PROFILE)
-                       smu7_enable_power_containment(hwmgr);
-               else if (request->type == AMD_PP_COMPUTE_PROFILE)
-                       smu7_disable_power_containment(hwmgr);
-       }
-
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
                return -EINVAL;
 
index 2d55dab..5f9c3ef 100644 (file)
@@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
        force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
 
-       disable_mclk_switching = (info.display_count > 1) ||
-                                   disable_mclk_switching_for_frame_lock ||
-                                   disable_mclk_switching_for_vr ||
-                                   force_mclk_high;
+       if (info.display_count == 0)
+               disable_mclk_switching = false;
+       else
+               disable_mclk_switching = (info.display_count > 1) ||
+                       disable_mclk_switching_for_frame_lock ||
+                       disable_mclk_switching_for_vr ||
+                       force_mclk_high;
 
        sclk = vega10_ps->performance_levels[0].gfx_clock;
        mclk = vega10_ps->performance_levels[0].mem_clock;
index 5a13ff2..c0530a1 100644 (file)
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,
        r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
        r.handles[0] = or->handle;
 
+       if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
+           dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
+               r.pixel_format = DRM_FORMAT_XBGR2101010;
+
        ret = drm_mode_addfb2(dev, &r, file_priv);
        if (ret)
                return ret;
index 4401068..3ab1ace 100644 (file)
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
                list_add_tail(&vma->exec_link, &eb->unbound);
                if (drm_mm_node_allocated(&vma->node))
                        err = i915_vma_unbind(vma);
+               if (unlikely(err))
+                       vma->exec_flags = NULL;
        }
        return err;
 }
@@ -2410,7 +2412,7 @@ err_request:
        if (out_fence) {
                if (err == 0) {
                        fd_install(out_fence_fd, out_fence->file);
-                       args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+                       args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
                        args->rsvd2 |= (u64)out_fence_fd << 32;
                        out_fence_fd = -1;
                } else {
index e09d18d..a3e93d4 100644 (file)
@@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->timeline->lock);
 
-       trace_i915_gem_request_execute(request);
-
        /* Transfer from per-context onto the global per-engine timeline */
        timeline = engine->timeline;
        GEM_BUG_ON(timeline == request->timeline);
@@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        list_move_tail(&request->link, &timeline->requests);
        spin_unlock(&request->timeline->lock);
 
+       trace_i915_gem_request_execute(request);
+
        wake_up_all(&request->execute);
 }
 
index a2108e3..33eb0c5 100644 (file)
@@ -2027,7 +2027,7 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_DW5_LN0_AE                0x162454
 #define _CNL_PORT_TX_DW5_LN0_B         0x162654
 #define _CNL_PORT_TX_DW5_LN0_C         0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D         0x162ED4
+#define _CNL_PORT_TX_DW5_LN0_D         0x162E54
 #define _CNL_PORT_TX_DW5_LN0_F         0x162854
 #define CNL_PORT_TX_DW5_GRP(port)      _MMIO_PORT6(port, \
                                                    _CNL_PORT_TX_DW5_GRP_AE, \
@@ -2058,7 +2058,7 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_DW7_LN0_AE                0x16245C
 #define _CNL_PORT_TX_DW7_LN0_B         0x16265C
 #define _CNL_PORT_TX_DW7_LN0_C         0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D         0x162EDC
+#define _CNL_PORT_TX_DW7_LN0_D         0x162E5C
 #define _CNL_PORT_TX_DW7_LN0_F         0x16285C
 #define CNL_PORT_TX_DW7_GRP(port)      _MMIO_PORT6(port, \
                                                    _CNL_PORT_TX_DW7_GRP_AE, \
index 522d54f..4a01f62 100644 (file)
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
 {
        struct intel_encoder *encoder;
 
-       if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
-               return NULL;
-
        /* MST */
        if (pipe >= 0) {
+               if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+                       return NULL;
+
                encoder = dev_priv->av_enc_map[pipe];
                /*
                 * when bootup, audio driver may not know it is
index 3e9bba4..6d8e3a9 100644 (file)
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        } else {
                dev_info(&pdev->dev,
                         "no iommu, fallback to phys contig buffers for scanout\n");
-               aspace = NULL;;
+               aspace = NULL;
        }
 
        pm_runtime_put_sync(&pdev->dev);
index dd8d435..caddce8 100644 (file)
@@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev)
        nouveau_display(dev)->fini = nv50_display_fini;
        disp->disp = &nouveau_display(dev)->disp;
        dev->mode_config.funcs = &nv50_disp_func;
+       dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
        if (nouveau_atomic)
                dev->driver->driver_features |= DRIVER_ATOMIC;
 
index 8d3e3d2..7828a5e 100644 (file)
@@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev,
        if ((rdev->flags & RADEON_IS_PCI) &&
            (rdev->family <= CHIP_RS740))
                rdev->need_dma32 = true;
+#ifdef CONFIG_PPC64
+       if (rdev->family == CHIP_CEDAR)
+               rdev->need_dma32 = true;
+#endif
 
        dma_bits = rdev->need_dma32 ? 32 : 40;
        r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
index 326ad06..4b65425 100644 (file)
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
 static void radeon_pm_update_profile(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
 
 int radeon_pm_get_type_index(struct radeon_device *rdev,
                             enum radeon_pm_state_type ps_type,
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
                                radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
                }
                mutex_unlock(&rdev->pm.mutex);
-               /* allow new DPM state to be picked */
-               radeon_pm_compute_clocks_dpm(rdev);
        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (rdev->pm.profile == PM_PROFILE_AUTO) {
                        mutex_lock(&rdev->pm.mutex);
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
                dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
        /* balanced states don't exist at the moment */
        if (dpm_state == POWER_STATE_TYPE_BALANCED)
-               dpm_state = rdev->pm.dpm.ac_power ?
-                       POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
+               dpm_state = POWER_STATE_TYPE_PERFORMANCE;
 
 restart_search:
        /* Pick the best power state based on current conditions */
index 2c18996..0d95888 100644 (file)
@@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 {
        struct drm_sched_job *s_job;
        struct drm_sched_entity *entity, *tmp;
-       int i;;
+       int i;
 
        spin_lock(&sched->job_list_lock);
        list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
index 3c15cf2..b396011 100644 (file)
@@ -260,7 +260,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
                                        const struct drm_display_mode *mode)
 {
        /* Configure the dot clock */
-       clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+       clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
 
        /* Set the resolution */
        regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -335,6 +335,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
        regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
                           SUN4I_TCON_GCTL_IOMAP_MASK,
                           SUN4I_TCON_GCTL_IOMAP_TCON0);
+
+       /* Enable the output on the pins */
+       regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
 }
 
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
@@ -418,7 +421,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
        WARN_ON(!tcon->quirks->has_channel_1);
 
        /* Configure the dot clock */
-       clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
+       clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
 
        /* Adjust clock delay */
        clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
index 5720a0d..677ac16 100644 (file)
@@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
        case VIRTGPU_PARAM_3D_FEATURES:
                value = vgdev->has_virgl_3d == true ? 1 : 0;
                break;
+       case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
+               value = 1;
+               break;
        default:
                return -EINVAL;
        }
@@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct drm_virtgpu_get_caps *args = data;
-       int size;
+       unsigned size, host_caps_size;
        int i;
        int found_valid = -1;
        int ret;
@@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
        if (vgdev->num_capsets == 0)
                return -ENOSYS;
 
+       /* don't allow userspace to pass 0 */
+       if (args->size == 0)
+               return -EINVAL;
+
        spin_lock(&vgdev->display_info_lock);
        for (i = 0; i < vgdev->num_capsets; i++) {
                if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
                return -EINVAL;
        }
 
-       size = vgdev->capsets[found_valid].max_size;
-       if (args->size > size) {
-               spin_unlock(&vgdev->display_info_lock);
-               return -EINVAL;
-       }
+       host_caps_size = vgdev->capsets[found_valid].max_size;
+       /* only copy to user the minimum of the host caps size or the guest caps size */
+       size = min(args->size, host_caps_size);
 
        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
                if (cache_ent->id == args->cap_set_id &&
index 1d87757..d960790 100644 (file)
@@ -233,6 +233,7 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
                return -EOPNOTSUPP;
 
        case STAT_TXDATA_NAK:
+       case STAT_BUS_ERROR:
                return -EIO;
        case STAT_TXADDR_NAK:
        case STAT_RXADDR_NAK:
index a7ef198..9bb9f64 100644 (file)
@@ -43,7 +43,7 @@
 #define TWSI_CTL_AAK           0x04    /* Assert ACK */
 
 /* Status values */
-#define STAT_ERROR             0x00
+#define STAT_BUS_ERROR         0x00
 #define STAT_START             0x08
 #define STAT_REP_START         0x10
 #define STAT_TXADDR_ACK                0x18
index 17fd55a..caa20eb 100644 (file)
@@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data)
 {
        struct gendisk *p = data;
 
-       if (!get_disk(p))
+       if (!get_disk_and_module(p))
                return -1;
        return 0;
 }
index 35a408d..99bc9bd 100644 (file)
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
                         * for example, an "address" value of 0x12345f000 will
                         * flush from 0x123440000 to 0x12347ffff (256KiB). */
                        unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
-                       unsigned long mask = __rounddown_pow_of_two(address ^ last);;
+                       unsigned long mask = __rounddown_pow_of_two(address ^ last);
 
                        desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
                } else {
index 1a46b41..6422846 100644 (file)
@@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
 static void search_free(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
-       bio_complete(s);
 
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
+       bio_complete(s);
        closure_debug_destroy(cl);
        mempool_free(s, s->d->c->search);
 }
index 3128957..4d1d8df 100644 (file)
@@ -1274,7 +1274,7 @@ static int flash_devs_run(struct cache_set *c)
        struct uuid_entry *u;
 
        for (u = c->uuids;
-            u < c->uuids + c->devices_max_used && !ret;
+            u < c->uuids + c->nr_uuids && !ret;
             u++)
                if (UUID_FLASH_ONLY(u))
                        ret = flash_dev_run(c, u);
index e40065b..0a7e99d 100644 (file)
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
                seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
        }
        rcu_read_unlock();
-       seq_printf (seq, "]");
+       seq_putc(seq, ']');
 }
 
 static int multipath_congested(struct mddev *mddev, int bits)
index bc67ab6..254e44e 100644 (file)
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        struct bio *bio;
        int ff = 0;
 
+       if (!page)
+               return;
+
        if (test_bit(Faulty, &rdev->flags))
                return;
 
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev)
         * the only valid external interface is through the md
         * device.
         */
+       mddev->has_superblocks = false;
        rdev_for_each(rdev, mddev) {
                if (test_bit(Faulty, &rdev->flags))
                        continue;
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev)
                                set_disk_ro(mddev->gendisk, 1);
                }
 
+               if (rdev->sb_page)
+                       mddev->has_superblocks = true;
+
                /* perform some consistency tests on the device.
                 * We don't want the data to overlap the metadata,
                 * Internal Bitmap issues have been handled elsewhere.
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev)
        }
        if (mddev->sync_set == NULL) {
                mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
-               if (!mddev->sync_set)
-                       return -ENOMEM;
+               if (!mddev->sync_set) {
+                       err = -ENOMEM;
+                       goto abort;
+               }
        }
 
        spin_lock(&pers_lock);
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev)
        sysfs_notify_dirent_safe(mddev->sysfs_action);
        sysfs_notify(&mddev->kobj, NULL, "degraded");
        return 0;
+
+abort:
+       if (mddev->bio_set) {
+               bioset_free(mddev->bio_set);
+               mddev->bio_set = NULL;
+       }
+       if (mddev->sync_set) {
+               bioset_free(mddev->sync_set);
+               mddev->sync_set = NULL;
+       }
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(md_run);
 
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync);
 bool md_write_start(struct mddev *mddev, struct bio *bi)
 {
        int did_change = 0;
+
        if (bio_data_dir(bi) != WRITE)
                return true;
 
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
        rcu_read_unlock();
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
+       if (!mddev->has_superblocks)
+               return true;
        wait_event(mddev->sb_wait,
                   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
                   mddev->suspended);
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread)
        set_mask_bits(&mddev->sb_flags, 0,
                      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
 
+       if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+                       !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+                       mddev->delta_disks > 0 &&
+                       mddev->pers->finish_reshape &&
+                       mddev->pers->size &&
+                       mddev->queue) {
+               mddev_lock_nointr(mddev);
+               md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+               mddev_unlock(mddev);
+               set_capacity(mddev->gendisk, mddev->array_sectors);
+               revalidate_disk(mddev->gendisk);
+       }
+
        spin_lock(&mddev->lock);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
                /* We completed so min/max setting can be forgotten if used. */
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev,
        int removed = 0;
        bool remove_some = false;
 
+       if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+               /* Mustn't remove devices when resync thread is running */
+               return 0;
+
        rdev_for_each(rdev, mddev) {
                if ((this == NULL || rdev == this) &&
                    rdev->raid_disk >= 0 &&
index 58cd20a..fbc925c 100644 (file)
@@ -468,6 +468,8 @@ struct mddev {
        void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
        struct md_cluster_info          *cluster_info;
        unsigned int                    good_device_nr; /* good device num within cluster raid */
+
+       bool    has_superblocks:1;
 };
 
 enum recovery_flags {
index b2eae33..fe872dc 100644 (file)
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
 
        bio_copy_data(behind_bio, bio);
 skip_copy:
-       r1_bio->behind_master_bio = behind_bio;;
+       r1_bio->behind_master_bio = behind_bio;
        set_bit(R1BIO_BehindIO, &r1_bio->state);
 
        return;
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
                        struct md_rdev *repl =
                                conf->mirrors[conf->raid_disks + number].rdev;
                        freeze_array(conf, 0);
+                       if (atomic_read(&repl->nr_pending)) {
+                               /* It means that some queued IO of retry_list
+                                * hold repl. Thus, we cannot set replacement
+                                * as NULL, avoiding rdev NULL pointer
+                                * dereference in sync_request_write and
+                                * handle_write_finished.
+                                */
+                               err = -EBUSY;
+                               unfreeze_array(conf);
+                               goto abort;
+                       }
                        clear_bit(Replacement, &repl->flags);
                        p->rdev = repl;
                        conf->mirrors[conf->raid_disks + number].rdev = NULL;
index c7294e7..eb84bc6 100644 (file)
 #define BARRIER_BUCKETS_NR_BITS                (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
 #define BARRIER_BUCKETS_NR             (1<<BARRIER_BUCKETS_NR_BITS)
 
+/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
+ * There are three safe ways to access raid1_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery is known to be happening - i.e. in code that is
+ *    called as part of performing resync/recovery.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ *    and if it is non-NULL, increment rdev->nr_pending before dropping the
+ *    RCU lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if it has
+ * been incremented, the pointer is put back in .rdev.
+ */
+
 struct raid1_info {
        struct md_rdev  *rdev;
        sector_t        head_position;
index 99c9207..c5e6c60 100644 (file)
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)
 #define RESYNC_WINDOW (1024*1024)
 /* maximum number of concurrent requests, memory permitting */
 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
-#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 
 /*
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                for (m = 0; m < conf->copies; m++) {
                        int dev = r10_bio->devs[m].devnum;
                        rdev = conf->mirrors[dev].rdev;
-                       if (r10_bio->devs[m].bio == NULL)
+                       if (r10_bio->devs[m].bio == NULL ||
+                               r10_bio->devs[m].bio->bi_end_io == NULL)
                                continue;
                        if (!r10_bio->devs[m].bio->bi_status) {
                                rdev_clear_badblocks(
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                                        md_error(conf->mddev, rdev);
                        }
                        rdev = conf->mirrors[dev].replacement;
-                       if (r10_bio->devs[m].repl_bio == NULL)
+                       if (r10_bio->devs[m].repl_bio == NULL ||
+                               r10_bio->devs[m].repl_bio->bi_end_io == NULL)
                                continue;
 
                        if (!r10_bio->devs[m].repl_bio->bi_status) {
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev)
                if (fc > 1 || fo > 0) {
                        pr_err("only near layout is supported by clustered"
                                " raid10\n");
-                       goto out;
+                       goto out_free_conf;
                }
        }
 
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev)
                return;
 
        if (mddev->delta_disks > 0) {
-               sector_t size = raid10_size(mddev, 0, 0);
-               md_set_array_sectors(mddev, size);
                if (mddev->recovery_cp > mddev->resync_max_sectors) {
                        mddev->recovery_cp = mddev->resync_max_sectors;
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                }
-               mddev->resync_max_sectors = size;
-               if (mddev->queue) {
-                       set_capacity(mddev->gendisk, mddev->array_sectors);
-                       revalidate_disk(mddev->gendisk);
-               }
+               mddev->resync_max_sectors = mddev->array_sectors;
        } else {
                int d;
                rcu_read_lock();
index db2ac22..e2e8840 100644 (file)
@@ -2,6 +2,19 @@
 #ifndef _RAID10_H
 #define _RAID10_H
 
+/* Note: raid10_info.rdev can be set to NULL asynchronously by
+ * raid10_remove_disk.
+ * There are three safe ways to access raid10_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery/reshape is known to be happening - i.e. in code
+ *    that is called as part of performing resync/recovery/reshape.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ *    and if it is non-NULL, increment rdev->nr_pending before dropping the
+ *    RCU lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if it has
+ * been incremented, the pointer is put back in .rdev.
+ */
+
 struct raid10_info {
        struct md_rdev  *rdev, *replacement;
        sector_t        head_position;
index 0c76bce..a001808 100644 (file)
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf);
 extern void ppl_stripe_write_finished(struct stripe_head *sh);
 extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
 extern void ppl_quiesce(struct r5conf *conf, int quiesce);
+extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
 
 static inline bool raid5_has_ppl(struct r5conf *conf)
 {
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
        if (conf->log)
                ret = r5l_handle_flush_request(conf->log, bio);
        else if (raid5_has_ppl(conf))
-               ret = 0;
+               ret = ppl_handle_flush_request(conf->log, bio);
 
        return ret;
 }
index 2764c22..42890a0 100644 (file)
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
        }
 }
 
+int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
+{
+       if (bio->bi_iter.bi_size == 0) {
+               bio_endio(bio);
+               return 0;
+       }
+       bio->bi_opf &= ~REQ_PREFLUSH;
+       return -EAGAIN;
+}
+
 void ppl_stripe_write_finished(struct stripe_head *sh)
 {
        struct ppl_io_unit *io;
index 50d0114..b5d2601 100644 (file)
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 static int grow_stripes(struct r5conf *conf, int num)
 {
        struct kmem_cache *sc;
+       size_t namelen = sizeof(conf->cache_name[0]);
        int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
        if (conf->mddev->gendisk)
-               sprintf(conf->cache_name[0],
+               snprintf(conf->cache_name[0], namelen,
                        "raid%d-%s", conf->level, mdname(conf->mddev));
        else
-               sprintf(conf->cache_name[0],
+               snprintf(conf->cache_name[0], namelen,
                        "raid%d-%p", conf->level, conf->mddev);
-       sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+       snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
 
        conf->active_name = 0;
        sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf)
 
        log_exit(conf);
 
-       if (conf->shrinker.nr_deferred)
-               unregister_shrinker(&conf->shrinker);
-
+       unregister_shrinker(&conf->shrinker);
        free_thread_groups(conf);
        shrink_stripes(conf);
        raid5_free_percpu(conf);
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
 
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 
-               if (mddev->delta_disks > 0) {
-                       md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
-                       if (mddev->queue) {
-                               set_capacity(mddev->gendisk, mddev->array_sectors);
-                               revalidate_disk(mddev->gendisk);
-                       }
-               } else {
+               if (mddev->delta_disks <= 0) {
                        int d;
                        spin_lock_irq(&conf->device_lock);
                        mddev->degraded = raid5_calc_degraded(conf);
index 2e61238..3f8da26 100644 (file)
@@ -450,6 +450,18 @@ enum {
  * HANDLE gets cleared if stripe_handle leaves nothing locked.
  */
 
+/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
+ * There are three safe ways to access disk_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
+ *    is called as part of performing resync/recovery/reshape.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
+ *    lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if
+ * it has been incremented, the pointer is put back in .rdev.
+ */
+
 struct disk_info {
        struct md_rdev  *rdev, *replacement;
        struct page     *extra_page; /* extra page to use in prexor */
index 145e12b..372c074 100644 (file)
@@ -147,6 +147,8 @@ config DVB_CORE
 config DVB_MMAP
        bool "Enable DVB memory-mapped API (EXPERIMENTAL)"
        depends on DVB_CORE
+       depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
+       select VIDEOBUF2_VMALLOC
        default n
        help
          This option enables DVB experimental memory-mapped API, with
index 5df0525..17c32ea 100644 (file)
@@ -3,6 +3,9 @@ config VIDEOBUF2_CORE
        select DMA_SHARED_BUFFER
        tristate
 
+config VIDEOBUF2_V4L2
+       tristate
+
 config VIDEOBUF2_MEMOPS
        tristate
        select FRAME_VECTOR
index 19de5cc..77bebe8 100644 (file)
@@ -1,5 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+videobuf2-common-objs := videobuf2-core.o
 
-obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o
+ifeq ($(CONFIG_TRACEPOINTS),y)
+  videobuf2-common-objs += vb2-trace.o
+endif
+
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o
+obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
 obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
 obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
 obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
diff --git a/drivers/media/common/videobuf2/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c
new file mode 100644 (file)
index 0000000..4c0f39d
--- /dev/null
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/vb2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
index 3a105d8..62b028d 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
-dvb-vb2-$(CONFIG_DVB_MMSP) := dvb_vb2.o
+dvb-vb2-$(CONFIG_DVB_MMAP) := dvb_vb2.o
 
 dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o                 \
                 dvb_ca_en50221.o dvb_frontend.o                \
index 6d53af0..61a750f 100644 (file)
@@ -128,11 +128,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
        struct dmx_frontend *front;
-#ifndef DVB_MMAP
        bool need_ringbuffer = false;
-#else
-       const bool need_ringbuffer = true;
-#endif
 
        dprintk("%s\n", __func__);
 
@@ -144,17 +140,31 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
                return -ENODEV;
        }
 
-#ifndef DVB_MMAP
+       dmxdev->may_do_mmap = 0;
+
+       /*
+        * The logic here is a little tricky due to the ifdef.
+        *
+        * The ringbuffer is used for both read and mmap.
+        *
+        * It is not needed, however, on two situations:
+        *      - Write devices (access with O_WRONLY);
+        *      - For duplex device nodes, opened with O_RDWR.
+        */
+
        if ((file->f_flags & O_ACCMODE) == O_RDONLY)
                need_ringbuffer = true;
-#else
-       if ((file->f_flags & O_ACCMODE) == O_RDWR) {
+       else if ((file->f_flags & O_ACCMODE) == O_RDWR) {
                if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
+#ifdef CONFIG_DVB_MMAP
+                       dmxdev->may_do_mmap = 1;
+                       need_ringbuffer = true;
+#else
                        mutex_unlock(&dmxdev->mutex);
                        return -EOPNOTSUPP;
+#endif
                }
        }
-#endif
 
        if (need_ringbuffer) {
                void *mem;
@@ -169,8 +179,9 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
                        return -ENOMEM;
                }
                dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
-               dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
-                            file->f_flags & O_NONBLOCK);
+               if (dmxdev->may_do_mmap)
+                       dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
+                                    file->f_flags & O_NONBLOCK);
                dvbdev->readers--;
        }
 
@@ -200,11 +211,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
-#ifndef DVB_MMAP
-       bool need_ringbuffer = false;
-#else
-       const bool need_ringbuffer = true;
-#endif
 
        mutex_lock(&dmxdev->mutex);
 
@@ -213,15 +219,14 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
                dmxdev->demux->connect_frontend(dmxdev->demux,
                                                dmxdev->dvr_orig_fe);
        }
-#ifndef DVB_MMAP
-       if ((file->f_flags & O_ACCMODE) == O_RDONLY)
-               need_ringbuffer = true;
-#endif
 
-       if (need_ringbuffer) {
-               if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
-                       dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
-               dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
+       if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+           dmxdev->may_do_mmap) {
+               if (dmxdev->may_do_mmap) {
+                       if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
+                               dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
+                       dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
+               }
                dvbdev->readers++;
                if (dmxdev->dvr_buffer.data) {
                        void *mem = dmxdev->dvr_buffer.data;
@@ -380,7 +385,8 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
 
 static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
                                       const u8 *buffer2, size_t buffer2_len,
-                                      struct dmx_section_filter *filter)
+                                      struct dmx_section_filter *filter,
+                                      u32 *buffer_flags)
 {
        struct dmxdev_filter *dmxdevfilter = filter->priv;
        int ret;
@@ -399,10 +405,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
        dprintk("section callback %*ph\n", 6, buffer1);
        if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) {
                ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
-                                         buffer1, buffer1_len);
+                                         buffer1, buffer1_len,
+                                         buffer_flags);
                if (ret == buffer1_len)
                        ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
-                                                 buffer2, buffer2_len);
+                                                 buffer2, buffer2_len,
+                                                 buffer_flags);
        } else {
                ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
                                              buffer1, buffer1_len);
@@ -422,11 +430,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
 
 static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
                                  const u8 *buffer2, size_t buffer2_len,
-                                 struct dmx_ts_feed *feed)
+                                 struct dmx_ts_feed *feed,
+                                 u32 *buffer_flags)
 {
        struct dmxdev_filter *dmxdevfilter = feed->priv;
        struct dvb_ringbuffer *buffer;
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        struct dvb_vb2_ctx *ctx;
 #endif
        int ret;
@@ -440,20 +449,22 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
        if (dmxdevfilter->params.pes.output == DMX_OUT_TAP ||
            dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
                buffer = &dmxdevfilter->buffer;
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
                ctx = &dmxdevfilter->vb2_ctx;
 #endif
        } else {
                buffer = &dmxdevfilter->dev->dvr_buffer;
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
                ctx = &dmxdevfilter->dev->dvr_vb2_ctx;
 #endif
        }
 
        if (dvb_vb2_is_streaming(ctx)) {
-               ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len);
+               ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len,
+                                         buffer_flags);
                if (ret == buffer1_len)
-                       ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len);
+                       ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len,
+                                                 buffer_flags);
        } else {
                if (buffer->error) {
                        spin_unlock(&dmxdevfilter->dev->lock);
@@ -802,6 +813,12 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
        mutex_init(&dmxdevfilter->mutex);
        file->private_data = dmxdevfilter;
 
+#ifdef CONFIG_DVB_MMAP
+       dmxdev->may_do_mmap = 1;
+#else
+       dmxdev->may_do_mmap = 0;
+#endif
+
        dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
        dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter",
                     file->f_flags & O_NONBLOCK);
@@ -1111,7 +1128,7 @@ static int dvb_demux_do_ioctl(struct file *file,
                mutex_unlock(&dmxdevfilter->mutex);
                break;
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        case DMX_REQBUFS:
                if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
                        mutex_unlock(&dmxdev->mutex);
@@ -1160,7 +1177,7 @@ static int dvb_demux_do_ioctl(struct file *file,
                break;
 #endif
        default:
-               ret = -EINVAL;
+               ret = -ENOTTY;
                break;
        }
        mutex_unlock(&dmxdev->mutex);
@@ -1199,13 +1216,16 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
 static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct dmxdev_filter *dmxdevfilter = file->private_data;
        struct dmxdev *dmxdev = dmxdevfilter->dev;
        int ret;
 
+       if (!dmxdev->may_do_mmap)
+               return -ENOTTY;
+
        if (mutex_lock_interruptible(&dmxdev->mutex))
                return -ERESTARTSYS;
 
@@ -1249,7 +1269,7 @@ static const struct file_operations dvb_demux_fops = {
        .release = dvb_demux_release,
        .poll = dvb_demux_poll,
        .llseek = default_llseek,
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        .mmap = dvb_demux_mmap,
 #endif
 };
@@ -1280,7 +1300,7 @@ static int dvb_dvr_do_ioctl(struct file *file,
                ret = dvb_dvr_set_buffer_size(dmxdev, arg);
                break;
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        case DMX_REQBUFS:
                ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg);
                break;
@@ -1304,7 +1324,7 @@ static int dvb_dvr_do_ioctl(struct file *file,
                break;
 #endif
        default:
-               ret = -EINVAL;
+               ret = -ENOTTY;
                break;
        }
        mutex_unlock(&dmxdev->mutex);
@@ -1322,11 +1342,6 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
        __poll_t mask = 0;
-#ifndef DVB_MMAP
-       bool need_ringbuffer = false;
-#else
-       const bool need_ringbuffer = true;
-#endif
 
        dprintk("%s\n", __func__);
 
@@ -1337,11 +1352,8 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
 
-#ifndef DVB_MMAP
-       if ((file->f_flags & O_ACCMODE) == O_RDONLY)
-               need_ringbuffer = true;
-#endif
-       if (need_ringbuffer) {
+       if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+           dmxdev->may_do_mmap) {
                if (dmxdev->dvr_buffer.error)
                        mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
 
@@ -1353,13 +1365,16 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
 static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
        int ret;
 
+       if (!dmxdev->may_do_mmap)
+               return -ENOTTY;
+
        if (dmxdev->exit)
                return -ENODEV;
 
@@ -1381,7 +1396,7 @@ static const struct file_operations dvb_dvr_fops = {
        .release = dvb_dvr_release,
        .poll = dvb_dvr_poll,
        .llseek = default_llseek,
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        .mmap = dvb_dvr_mmap,
 #endif
 };
index 210eed0..f450912 100644 (file)
@@ -55,6 +55,17 @@ MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
                dprintk(x);                             \
 } while (0)
 
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+#  define dprintk_sect_loss(x...) dprintk(x)
+#else
+#  define dprintk_sect_loss(x...)
+#endif
+
+#define set_buf_flags(__feed, __flag)                  \
+       do {                                            \
+               (__feed)->buffer_flags |= (__flag);     \
+       } while (0)
+
 /******************************************************************************
  * static inlined helper functions
  ******************************************************************************/
@@ -104,31 +115,30 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
 {
        int count = payload(buf);
        int p;
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
        int ccok;
        u8 cc;
-#endif
 
        if (count == 0)
                return -1;
 
        p = 188 - count;
 
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
        cc = buf[3] & 0x0f;
        ccok = ((feed->cc + 1) & 0x0f) == cc;
        feed->cc = cc;
-       if (!ccok)
-               dprintk("missed packet: %d instead of %d!\n",
-                       cc, (feed->cc + 1) & 0x0f);
-#endif
+       if (!ccok) {
+               set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+               dprintk_sect_loss("missed packet: %d instead of %d!\n",
+                                 cc, (feed->cc + 1) & 0x0f);
+       }
 
        if (buf[1] & 0x40)      // PUSI ?
                feed->peslen = 0xfffa;
 
        feed->peslen += count;
 
-       return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+       return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts,
+                          &feed->buffer_flags);
 }
 
 static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -150,7 +160,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
                return 0;
 
        return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
-                           NULL, 0, &f->filter);
+                           NULL, 0, &f->filter, &feed->buffer_flags);
 }
 
 static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
@@ -169,8 +179,10 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
        if (sec->check_crc) {
                section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);
                if (section_syntax_indicator &&
-                   demux->check_crc32(feed, sec->secbuf, sec->seclen))
+                   demux->check_crc32(feed, sec->secbuf, sec->seclen)) {
+                       set_buf_flags(feed, DMX_BUFFER_FLAG_HAD_CRC32_DISCARD);
                        return -1;
+               }
        }
 
        do {
@@ -187,7 +199,6 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
 {
        struct dmx_section_feed *sec = &feed->feed.sec;
 
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
        if (sec->secbufp < sec->tsfeedp) {
                int n = sec->tsfeedp - sec->secbufp;
 
@@ -197,12 +208,13 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
                 * but just first and last.
                 */
                if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) {
-                       dprintk("section ts padding loss: %d/%d\n",
-                              n, sec->tsfeedp);
-                       dprintk("pad data: %*ph\n", n, sec->secbuf);
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("section ts padding loss: %d/%d\n",
+                                         n, sec->tsfeedp);
+                       dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf);
                }
        }
-#endif
 
        sec->tsfeedp = sec->secbufp = sec->seclen = 0;
        sec->secbuf = sec->secbuf_base;
@@ -237,11 +249,10 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
                return 0;
 
        if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) {
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               dprintk("section buffer full loss: %d/%d\n",
-                       sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
-                       DMX_MAX_SECFEED_SIZE);
-#endif
+               set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+               dprintk_sect_loss("section buffer full loss: %d/%d\n",
+                                 sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
+                                 DMX_MAX_SECFEED_SIZE);
                len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp;
        }
 
@@ -269,12 +280,13 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
                sec->seclen = seclen;
                sec->crc_val = ~0;
                /* dump [secbuf .. secbuf+seclen) */
-               if (feed->pusi_seen)
+               if (feed->pusi_seen) {
                        dvb_dmx_swfilter_section_feed(feed);
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               else
-                       dprintk("pusi not seen, discarding section data\n");
-#endif
+               } else {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("pusi not seen, discarding section data\n");
+               }
                sec->secbufp += seclen; /* secbufp and secbuf moving together is */
                sec->secbuf += seclen;  /* redundant but saves pointer arithmetic */
        }
@@ -307,18 +319,22 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
        }
 
        if (!ccok || dc_i) {
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               if (dc_i)
-                       dprintk("%d frame with disconnect indicator\n",
+               if (dc_i) {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR);
+                       dprintk_sect_loss("%d frame with disconnect indicator\n",
                                cc);
-               else
-                       dprintk("discontinuity: %d instead of %d. %d bytes lost\n",
+               } else {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("discontinuity: %d instead of %d. %d bytes lost\n",
                                cc, (feed->cc + 1) & 0x0f, count + 4);
+               }
                /*
-                * those bytes under sume circumstances will again be reported
+                * those bytes under some circumstances will again be reported
                 * in the following dvb_dmx_swfilter_section_new
                 */
-#endif
+
                /*
                 * Discontinuity detected. Reset pusi_seen to
                 * stop feeding of suspicious data until next PUSI=1 arrives
@@ -326,6 +342,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
                 * FIXME: does it make sense if the MPEG-TS is the one
                 *      reporting discontinuity?
                 */
+
                feed->pusi_seen = false;
                dvb_dmx_swfilter_section_new(feed);
        }
@@ -345,11 +362,11 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
                        dvb_dmx_swfilter_section_new(feed);
                        dvb_dmx_swfilter_section_copy_dump(feed, after,
                                                           after_len);
+               } else if (count > 0) {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("PUSI=1 but %d bytes lost\n", count);
                }
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               else if (count > 0)
-                       dprintk("PUSI=1 but %d bytes lost\n", count);
-#endif
        } else {
                /* PUSI=0 (is not set), no section boundary */
                dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count);
@@ -369,7 +386,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
                        if (feed->ts_type & TS_PAYLOAD_ONLY)
                                dvb_dmx_swfilter_payload(feed, buf);
                        else
-                               feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+                               feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
+                                           &feed->buffer_flags);
                }
                /* Used only on full-featured devices */
                if (feed->ts_type & TS_DECODER)
@@ -430,6 +448,11 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
        }
 
        if (buf[1] & 0x80) {
+               list_for_each_entry(feed, &demux->feed_list, list_head) {
+                       if ((feed->pid != pid) && (feed->pid != 0x2000))
+                               continue;
+                       set_buf_flags(feed, DMX_BUFFER_FLAG_TEI);
+               }
                dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",
                                pid, buf[1]);
                /* data in this packet can't be trusted - drop it unless
@@ -445,6 +468,13 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                                                (demux->cnt_storage[pid] + 1) & 0xf;
 
                                if ((buf[3] & 0xf) != demux->cnt_storage[pid]) {
+                                       list_for_each_entry(feed, &demux->feed_list, list_head) {
+                                               if ((feed->pid != pid) && (feed->pid != 0x2000))
+                                                       continue;
+                                               set_buf_flags(feed,
+                                                             DMX_BUFFER_PKT_COUNTER_MISMATCH);
+                                       }
+
                                        dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
                                                        pid, demux->cnt_storage[pid],
                                                        buf[3] & 0xf);
@@ -466,7 +496,8 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                if (feed->pid == pid)
                        dvb_dmx_swfilter_packet_type(feed, buf);
                else if (feed->pid == 0x2000)
-                       feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+                       feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
+                                   &feed->buffer_flags);
        }
 }
 
@@ -585,7 +616,8 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
 
        spin_lock_irqsave(&demux->lock, flags);
 
-       demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
+       demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts,
+                          &demux->feed->buffer_flags);
 
        spin_unlock_irqrestore(&demux->lock, flags);
 }
@@ -785,6 +817,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
        feed->demux = demux;
        feed->pid = 0xffff;
        feed->peslen = 0xfffa;
+       feed->buffer_flags = 0;
 
        (*ts_feed) = &feed->feed.ts;
        (*ts_feed)->parent = dmx;
@@ -1042,6 +1075,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
        dvbdmxfeed->cb.sec = callback;
        dvbdmxfeed->demux = dvbdmx;
        dvbdmxfeed->pid = 0xffff;
+       dvbdmxfeed->buffer_flags = 0;
        dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
        dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
        dvbdmxfeed->feed.sec.tsfeedp = 0;
index b6c7eec..ba39f99 100644 (file)
@@ -883,7 +883,8 @@ static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
 
 static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
                               const u8 *buffer2, size_t buffer2_len,
-                              struct dmx_ts_feed *feed)
+                              struct dmx_ts_feed *feed,
+                              u32 *buffer_flags)
 {
        struct net_device *dev = feed->priv;
 
@@ -992,7 +993,7 @@ static void dvb_net_sec(struct net_device *dev,
 
 static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
                 const u8 *buffer2, size_t buffer2_len,
-                struct dmx_section_filter *filter)
+                struct dmx_section_filter *filter, u32 *buffer_flags)
 {
        struct net_device *dev = filter->priv;
 
index 763145d..b811adf 100644 (file)
@@ -256,7 +256,8 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx)
 }
 
 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
-                       const unsigned char *src, int len)
+                       const unsigned char *src, int len,
+                       enum dmx_buffer_flags *buffer_flags)
 {
        unsigned long flags = 0;
        void *vbuf = NULL;
@@ -264,15 +265,17 @@ int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
        unsigned char *psrc = (unsigned char *)src;
        int ll = 0;
 
-       dprintk(3, "[%s] %d bytes are rcvd\n", ctx->name, len);
-       if (!src) {
-               dprintk(3, "[%s]:NULL pointer src\n", ctx->name);
-               /**normal case: This func is called twice from demux driver
-                * once with valid src pointer, second time with NULL pointer
-                */
+       /*
+        * normal case: This func is called twice from demux driver
+        * one with valid src pointer, second time with NULL pointer
+        */
+       if (!src || !len)
                return 0;
-       }
        spin_lock_irqsave(&ctx->slock, flags);
+       if (buffer_flags && *buffer_flags) {
+               ctx->flags |= *buffer_flags;
+               *buffer_flags = 0;
+       }
        while (todo) {
                if (!ctx->buf) {
                        if (list_empty(&ctx->dvb_q)) {
@@ -395,6 +398,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
 
 int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
 {
+       unsigned long flags;
        int ret;
 
        ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking);
@@ -402,7 +406,16 @@ int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
                dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
                return ret;
        }
-       dprintk(5, "[%s] index=%d\n", ctx->name, b->index);
+
+       spin_lock_irqsave(&ctx->slock, flags);
+       b->count = ctx->count++;
+       b->flags = ctx->flags;
+       ctx->flags = 0;
+       spin_unlock_irqrestore(&ctx->slock, flags);
+
+       dprintk(5, "[%s] index=%d, count=%d, flags=%d\n",
+               ctx->name, b->index, ctx->count, b->flags);
+
 
        return 0;
 }
index 50bce68..65d157f 100644 (file)
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
  * New users must use I2C client binding directly!
  */
 struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
-               struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+                                     struct i2c_adapter *i2c,
+                                     struct i2c_adapter **tuner_i2c_adapter)
 {
        struct i2c_client *client;
        struct i2c_board_info board_info;
-       struct m88ds3103_platform_data pdata;
+       struct m88ds3103_platform_data pdata = {};
 
        pdata.clk = cfg->clock;
        pdata.i2c_wr_max = cfg->i2c_wr_max;
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
        case M88DS3103_CHIP_ID:
                break;
        default:
+               ret = -ENODEV;
+               dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
                goto err_kfree;
        }
 
index 3c18519..2476d81 100644 (file)
@@ -505,80 +505,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
        /* FIXME: Current api doesn't handle all VBI types, those not
           yet supported are placed under #if 0 */
 #if 0
-       {0x010, /* Teletext, SECAM, WST System A */
+       [0] = {0x010, /* Teletext, SECAM, WST System A */
                {V4L2_SLICED_TELETEXT_SECAM,6,23,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,
                  0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
 #endif
-       {0x030, /* Teletext, PAL, WST System B */
+       [1] = {0x030, /* Teletext, PAL, WST System B */
                {V4L2_SLICED_TELETEXT_B,6,22,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,
                  0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
 #if 0
-       {0x050, /* Teletext, PAL, WST System C */
+       [2] = {0x050, /* Teletext, PAL, WST System C */
                {V4L2_SLICED_TELETEXT_PAL_C,6,22,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
                  0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
-       {0x070, /* Teletext, NTSC, WST System B */
+       [3] = {0x070, /* Teletext, NTSC, WST System B */
                {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23,
                  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
-       {0x090, /* Tetetext, NTSC NABTS System C */
+       [4] = {0x090, /* Tetetext, NTSC NABTS System C */
                {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
                  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 }
        },
-       {0x0b0, /* Teletext, NTSC-J, NABTS System D */
+       [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */
                {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23,
                  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
-       {0x0d0, /* Closed Caption, PAL/SECAM */
+       [6] = {0x0d0, /* Closed Caption, PAL/SECAM */
                {V4L2_SLICED_CAPTION_625,22,22,1},
                { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
                  0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
        },
 #endif
-       {0x0f0, /* Closed Caption, NTSC */
+       [7] = {0x0f0, /* Closed Caption, NTSC */
                {V4L2_SLICED_CAPTION_525,21,21,1},
                { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
                  0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
        },
-       {0x110, /* Wide Screen Signal, PAL/SECAM */
+       [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */
                {V4L2_SLICED_WSS_625,23,23,1},
                { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,
                  0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }
        },
 #if 0
-       {0x130, /* Wide Screen Signal, NTSC C */
+       [9] = {0x130, /* Wide Screen Signal, NTSC C */
                {V4L2_SLICED_WSS_525,20,20,1},
                { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,
                  0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 }
        },
-       {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */
+       [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */
                {V4l2_SLICED_VITC_625,6,22,0},
                { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
                  0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
        },
-       {0x170, /* Vertical Interval Timecode (VITC), NTSC */
+       [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */
                {V4l2_SLICED_VITC_525,10,20,0},
                { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
                  0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
        },
 #endif
-       {0x190, /* Video Program System (VPS), PAL */
+       [12] = {0x190, /* Video Program System (VPS), PAL */
                {V4L2_SLICED_VPS,16,16,0},
                { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,
                  0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 }
        },
        /* 0x1d0 User programmable */
-
-       /* End of struct */
-       { (u16)-1 }
 };
 
 static int tvp5150_write_inittab(struct v4l2_subdev *sd,
@@ -591,10 +588,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd,
        return 0;
 }
 
-static int tvp5150_vdp_init(struct v4l2_subdev *sd,
-                               const struct i2c_vbi_ram_value *regs)
+static int tvp5150_vdp_init(struct v4l2_subdev *sd)
 {
        unsigned int i;
+       int j;
 
        /* Disable Full Field */
        tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0);
@@ -604,14 +601,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,
                tvp5150_write(sd, i, 0xff);
 
        /* Load Ram Table */
-       while (regs->reg != (u16)-1) {
+       for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) {
+               const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j];
+
+               if (!regs->type.vbi_type)
+                       continue;
+
                tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8);
                tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg);
 
                for (i = 0; i < 16; i++)
                        tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]);
-
-               regs++;
        }
        return 0;
 }
@@ -620,19 +620,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,
 static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
                                struct v4l2_sliced_vbi_cap *cap)
 {
-       const struct i2c_vbi_ram_value *regs = vbi_ram_default;
-       int line;
+       int line, i;
 
        dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");
        memset(cap, 0, sizeof *cap);
 
-       while (regs->reg != (u16)-1 ) {
-               for (line=regs->type.ini_line;line<=regs->type.end_line;line++) {
+       for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) {
+               const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i];
+
+               if (!regs->type.vbi_type)
+                       continue;
+
+               for (line = regs->type.ini_line;
+                    line <= regs->type.end_line;
+                    line++) {
                        cap->service_lines[0][line] |= regs->type.vbi_type;
                }
                cap->service_set |= regs->type.vbi_type;
-
-               regs++;
        }
        return 0;
 }
@@ -651,14 +655,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
  *     MSB = field2
  */
 static int tvp5150_set_vbi(struct v4l2_subdev *sd,
-                       const struct i2c_vbi_ram_value *regs,
                        unsigned int type,u8 flags, int line,
                        const int fields)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
        v4l2_std_id std = decoder->norm;
        u8 reg;
-       int pos = 0;
+       int i, pos = 0;
 
        if (std == V4L2_STD_ALL) {
                dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
@@ -671,19 +674,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
        if (line < 6 || line > 27)
                return 0;
 
-       while (regs->reg != (u16)-1) {
+       for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) {
+               const struct i2c_vbi_ram_value *regs =  &vbi_ram_default[i];
+
+               if (!regs->type.vbi_type)
+                       continue;
+
                if ((type & regs->type.vbi_type) &&
                    (line >= regs->type.ini_line) &&
                    (line <= regs->type.end_line))
                        break;
-
-               regs++;
                pos++;
        }
 
-       if (regs->reg == (u16)-1)
-               return 0;
-
        type = pos | (flags & 0xf0);
        reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
 
@@ -696,8 +699,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
        return type;
 }
 
-static int tvp5150_get_vbi(struct v4l2_subdev *sd,
-                       const struct i2c_vbi_ram_value *regs, int line)
+static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
        v4l2_std_id std = decoder->norm;
@@ -726,8 +728,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
                        return 0;
                }
                pos = ret & 0x0f;
-               if (pos < 0x0f)
-                       type |= regs[pos].type.vbi_type;
+               if (pos < ARRAY_SIZE(vbi_ram_default))
+                       type |= vbi_ram_default[pos].type.vbi_type;
        }
 
        return type;
@@ -788,7 +790,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
        tvp5150_write_inittab(sd, tvp5150_init_default);
 
        /* Initializes VDP registers */
-       tvp5150_vdp_init(sd, vbi_ram_default);
+       tvp5150_vdp_init(sd);
 
        /* Selects decoder input */
        tvp5150_selmux(sd);
@@ -1121,8 +1123,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
                for (i = 0; i <= 23; i++) {
                        svbi->service_lines[1][i] = 0;
                        svbi->service_lines[0][i] =
-                               tvp5150_set_vbi(sd, vbi_ram_default,
-                                      svbi->service_lines[0][i], 0xf0, i, 3);
+                               tvp5150_set_vbi(sd, svbi->service_lines[0][i],
+                                               0xf0, i, 3);
                }
                /* Enables FIFO */
                tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1);
@@ -1148,7 +1150,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
 
        for (i = 0; i <= 23; i++) {
                svbi->service_lines[0][i] =
-                       tvp5150_get_vbi(sd, vbi_ram_default, i);
+                       tvp5150_get_vbi(sd, i);
                mask |= svbi->service_lines[0][i];
        }
        svbi->service_set = mask;
index dc8e577..d6816ef 100644 (file)
@@ -324,14 +324,15 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
                }
                return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
                                                  buffer2, buffer2_len,
-                                                 &dvbdmxfilter->filter);
+                                                 &dvbdmxfilter->filter, NULL);
        case DMX_TYPE_TS:
                if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))
                        return 0;
                if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)
                        return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
                                                         buffer2, buffer2_len,
-                                                        &dvbdmxfilter->feed->feed.ts);
+                                                        &dvbdmxfilter->feed->feed.ts,
+                                                        NULL);
                else
                        av7110_p2t_write(buffer1, buffer1_len,
                                         dvbdmxfilter->feed->pid,
index 4daba76..ef1bc17 100644 (file)
@@ -99,7 +99,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
                buf[4] = buf[5] = 0;
        if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)
                return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
-                                        &dvbdmxfeed->feed.ts);
+                                        &dvbdmxfeed->feed.ts, NULL);
        else
                return dvb_filter_pes2ts(p2t, buf, len, 1);
 }
@@ -109,7 +109,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)
        struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv;
 
        dvbdmxfeed->cb.ts(data, 188, NULL, 0,
-                         &dvbdmxfeed->feed.ts);
+                         &dvbdmxfeed->feed.ts, NULL);
        return 0;
 }
 
@@ -814,7 +814,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
                        memcpy(obuf + l, buf + c, TS_SIZE - l);
                        c = length;
                }
-               feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts);
+               feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);
                pes_start = 0;
        }
 }
index 70521e0..bfaa806 100644 (file)
@@ -1,7 +1,7 @@
 
 config VIDEO_AU0828
        tristate "Auvitek AU0828 support"
-       depends on I2C && INPUT && DVB_CORE && USB
+       depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
        select I2C_ALGOBIT
        select VIDEO_TVEEPROM
        select VIDEOBUF2_VMALLOC
index a8900f5..44ca66c 100644 (file)
@@ -428,7 +428,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)
        struct ttusb_dec *dec = priv;
 
        dec->audio_filter->feed->cb.ts(data, 188, NULL, 0,
-                                      &dec->audio_filter->feed->feed.ts);
+                                      &dec->audio_filter->feed->feed.ts, NULL);
 
        return 0;
 }
@@ -438,7 +438,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data)
        struct ttusb_dec *dec = priv;
 
        dec->video_filter->feed->cb.ts(data, 188, NULL, 0,
-                                      &dec->video_filter->feed->feed.ts);
+                                      &dec->video_filter->feed->feed.ts, NULL);
 
        return 0;
 }
@@ -490,7 +490,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
 
                if (output_pva) {
                        dec->video_filter->feed->cb.ts(pva, length, NULL, 0,
-                               &dec->video_filter->feed->feed.ts);
+                               &dec->video_filter->feed->feed.ts, NULL);
                        return;
                }
 
@@ -551,7 +551,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
        case 0x02:              /* MainAudioStream */
                if (output_pva) {
                        dec->audio_filter->feed->cb.ts(pva, length, NULL, 0,
-                               &dec->audio_filter->feed->feed.ts);
+                               &dec->audio_filter->feed->feed.ts, NULL);
                        return;
                }
 
@@ -589,7 +589,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet,
 
        if (filter)
                filter->feed->cb.sec(&packet[2], length - 2, NULL, 0,
-                                    &filter->filter);
+                                    &filter->filter, NULL);
 }
 
 static void ttusb_dec_process_packet(struct ttusb_dec *dec)
index bf52fbd..8e37e7c 100644 (file)
@@ -7,6 +7,7 @@ config VIDEO_V4L2
        tristate
        depends on (I2C || I2C=n) && VIDEO_DEV
        select RATIONAL
+       select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE
        default (I2C || I2C=n) && VIDEO_DEV
 
 config VIDEO_ADV_DEBUG
index 80de2cb..7df5458 100644 (file)
@@ -13,7 +13,7 @@ ifeq ($(CONFIG_COMPAT),y)
 endif
 obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
 ifeq ($(CONFIG_TRACEPOINTS),y)
-  videodev-objs += vb2-trace.o v4l2-trace.o
+  videodev-objs += v4l2-trace.o
 endif
 videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
 
@@ -35,4 +35,3 @@ obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
 
 ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
 ccflags-y += -I$(srctree)/drivers/media/tuners
-
diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/v4l2-core/vb2-trace.c
deleted file mode 100644 (file)
index 4c0f39d..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <media/videobuf2-core.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/vb2.h>
-
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
-EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
index 0a7bdbe..e9c1485 100644 (file)
 #define REG_TO_DCPU_MBOX       0x10
 #define REG_TO_HOST_MBOX       0x14
 
+/* Macros to process offsets returned by the DCPU */
+#define DRAM_MSG_ADDR_OFFSET   0x0
+#define DRAM_MSG_TYPE_OFFSET   0x1c
+#define DRAM_MSG_ADDR_MASK     ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
+#define DRAM_MSG_TYPE_MASK     ((1UL << \
+                                (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
+
 /* Message RAM */
-#define DCPU_MSG_RAM(x)                (0x100 + (x) * sizeof(u32))
+#define DCPU_MSG_RAM_START     0x100
+#define DCPU_MSG_RAM(x)                (DCPU_MSG_RAM_START + (x) * sizeof(u32))
 
 /* DRAM Info Offsets & Masks */
 #define DRAM_INFO_INTERVAL     0x0
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[])
        return sum;
 }
 
+static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
+                                char *buf, ssize_t *size)
+{
+       unsigned int msg_type;
+       unsigned int offset;
+       void __iomem *ptr = NULL;
+
+       msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
+       offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
+
+       /*
+        * msg_type == 1: the offset is relative to the message RAM
+        * msg_type == 0: the offset is relative to the data RAM (this is the
+        *                previous way of passing data)
+        * msg_type is anything else: there's critical hardware problem
+        */
+       switch (msg_type) {
+       case 1:
+               ptr = priv->regs + DCPU_MSG_RAM_START + offset;
+               break;
+       case 0:
+               ptr = priv->dmem + offset;
+               break;
+       default:
+               dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
+                       response);
+               if (buf && size)
+                       *size = sprintf(buf,
+                               "FATAL: communication error with DCPU\n");
+       }
+
+       return ptr;
+}
+
 static int __send_command(struct private_data *priv, unsigned int cmd,
                          u32 result[])
 {
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
 {
        u32 response[MSG_FIELD_MAX];
        unsigned int info;
-       int ret;
+       ssize_t ret;
 
        ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
        if (ret)
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev,
        u32 response[MSG_FIELD_MAX];
        void __iomem *info;
        struct private_data *priv;
-       unsigned int offset;
        u8 refresh, sr_abort, ppre, thermal_offs, tuf;
        u32 mr4;
-       int ret;
+       ssize_t ret;
 
        ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
        if (ret)
                return ret;
 
        priv = dev_get_drvdata(dev);
-       offset = response[MSG_ARG0];
-       info = priv->dmem + offset;
+
+       info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+       if (!info)
+               return ret;
 
        mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
 
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
        u32 response[MSG_FIELD_MAX];
        struct private_data *priv;
        void __iomem *info;
-       unsigned int offset;
        unsigned long val;
        int ret;
 
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       offset = response[MSG_ARG0];
-       info = priv->dmem + offset;
+       info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
+       if (!info)
+               return -EIO;
+
        writel_relaxed(val, info + DRAM_INFO_INTERVAL);
 
        return count;
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
        u32 response[MSG_FIELD_MAX];
        struct private_data *priv;
        void __iomem *info;
-       unsigned int offset;
-       int ret;
+       ssize_t ret;
 
        ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
        if (ret)
                return ret;
 
-       offset = response[MSG_ARG0];
        priv = dev_get_drvdata(dev);
-       info = priv->dmem + offset;
+
+       info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+       if (!info)
+               return ret;
 
        return sprintf(buf, "%#x %#x %#x %#x %#x\n",
                       readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
                       readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
                       readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
                       readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
-                      readl_relaxed(info + DRAM_VENDOR_ERROR));
+                      readl_relaxed(info + DRAM_VENDOR_ERROR) &
+                                    DRAM_VENDOR_MASK);
 }
 
 static int brcmstb_dpfe_resume(struct platform_device *pdev)
index 2dd2db9..337462e 100644 (file)
@@ -133,8 +133,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
                if (!rc) {
                        rc = copy_to_user((u64 __user *) args, &irq_offset,
                                        sizeof(irq_offset));
-                       if (rc)
+                       if (rc) {
                                ocxl_afu_irq_free(ctx, irq_offset);
+                               return -EFAULT;
+                       }
                }
                break;
 
@@ -329,7 +331,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
 
        used += sizeof(header);
 
-       rc = (ssize_t) used;
+       rc = used;
        return rc;
 }
 
index 908e4db..42d6aa8 100644 (file)
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
                return 1;
        }
 
-       mmc_claim_host(card->host);
        err = mmc_send_status(card, &status);
        if (err) {
                pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
        } while (!err);
 
 out:
-       mmc_release_host(card->host);
        return err;
 }
 
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card)
        int err;
        u8 *ext_csd;
 
-       mmc_claim_host(card->host);
        err = mmc_get_ext_csd(card, &ext_csd);
-       mmc_release_host(card->host);
        if (err)
                return err;
 
index 3502679..fa41d94 100644 (file)
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
 
 static const struct dw_mci_drv_data exynos_drv_data = {
        .caps                   = exynos_dwmmc_caps,
+       .num_caps               = ARRAY_SIZE(exynos_dwmmc_caps),
        .init                   = dw_mci_exynos_priv_init,
        .set_ios                = dw_mci_exynos_set_ios,
        .parse_dt               = dw_mci_exynos_parse_dt,
index 73fd75c..89cdb3d 100644 (file)
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
        if (priv->ctrl_id < 0)
                priv->ctrl_id = 0;
 
+       if (priv->ctrl_id >= TIMING_MODE)
+               return -EINVAL;
+
        host->priv = priv;
        return 0;
 }
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 
 static const struct dw_mci_drv_data hi6220_data = {
        .caps                   = dw_mci_hi6220_caps,
+       .num_caps               = ARRAY_SIZE(dw_mci_hi6220_caps),
        .switch_voltage         = dw_mci_hi6220_switch_voltage,
        .set_ios                = dw_mci_hi6220_set_ios,
        .parse_dt               = dw_mci_hi6220_parse_dt,
index a3f1c2b..3392952 100644 (file)
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
 
 static const struct dw_mci_drv_data rk3288_drv_data = {
        .caps                   = dw_mci_rk3288_dwmmc_caps,
+       .num_caps               = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
        .set_ios                = dw_mci_rk3288_set_ios,
        .execute_tuning         = dw_mci_rk3288_execute_tuning,
        .parse_dt               = dw_mci_rk3288_parse_dt,
index d38e94a..c06b539 100644 (file)
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
 
 static const struct dw_mci_drv_data zx_drv_data = {
        .caps                   = zx_dwmmc_caps,
+       .num_caps               = ARRAY_SIZE(zx_dwmmc_caps),
        .execute_tuning         = dw_mci_zx_execute_tuning,
        .prepare_hs400_tuning   = dw_mci_zx_prepare_hs400_tuning,
        .parse_dt               = dw_mci_zx_parse_dt,
index 0aa3997..d9b4ace 100644 (file)
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
 {
        struct dw_mci *host = s->private;
 
+       pm_runtime_get_sync(host->dev);
+
        seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
        seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
        seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
        seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
        seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
 
+       pm_runtime_put_autosuspend(host->dev);
+
        return 0;
 }
 
@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
+{
+       struct dw_mci *host = slot->host;
+       const struct dw_mci_drv_data *drv_data = host->drv_data;
+       struct mmc_host *mmc = slot->mmc;
+       int ctrl_id;
+
+       if (host->pdata->caps)
+               mmc->caps = host->pdata->caps;
+
+       /*
+        * Support MMC_CAP_ERASE by default.
+        * It needs to use trim/discard/erase commands.
+        */
+       mmc->caps |= MMC_CAP_ERASE;
+
+       if (host->pdata->pm_caps)
+               mmc->pm_caps = host->pdata->pm_caps;
+
+       if (host->dev->of_node) {
+               ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+               if (ctrl_id < 0)
+                       ctrl_id = 0;
+       } else {
+               ctrl_id = to_platform_device(host->dev)->id;
+       }
+
+       if (drv_data && drv_data->caps) {
+               if (ctrl_id >= drv_data->num_caps) {
+                       dev_err(host->dev, "invalid controller id %d\n",
+                               ctrl_id);
+                       return -EINVAL;
+               }
+               mmc->caps |= drv_data->caps[ctrl_id];
+       }
+
+       if (host->pdata->caps2)
+               mmc->caps2 = host->pdata->caps2;
+
+       /* Process SDIO IRQs through the sdio_irq_work. */
+       if (mmc->caps & MMC_CAP_SDIO_IRQ)
+               mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+       return 0;
+}
+
 static int dw_mci_init_slot(struct dw_mci *host)
 {
        struct mmc_host *mmc;
        struct dw_mci_slot *slot;
-       const struct dw_mci_drv_data *drv_data = host->drv_data;
-       int ctrl_id, ret;
+       int ret;
        u32 freq[2];
 
        mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
        if (!mmc->ocr_avail)
                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 
-       if (host->pdata->caps)
-               mmc->caps = host->pdata->caps;
-
-       /*
-        * Support MMC_CAP_ERASE by default.
-        * It needs to use trim/discard/erase commands.
-        */
-       mmc->caps |= MMC_CAP_ERASE;
-
-       if (host->pdata->pm_caps)
-               mmc->pm_caps = host->pdata->pm_caps;
-
-       if (host->dev->of_node) {
-               ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
-               if (ctrl_id < 0)
-                       ctrl_id = 0;
-       } else {
-               ctrl_id = to_platform_device(host->dev)->id;
-       }
-       if (drv_data && drv_data->caps)
-               mmc->caps |= drv_data->caps[ctrl_id];
-
-       if (host->pdata->caps2)
-               mmc->caps2 = host->pdata->caps2;
-
        ret = mmc_of_parse(mmc);
        if (ret)
                goto err_host_allocated;
 
-       /* Process SDIO IRQs through the sdio_irq_work. */
-       if (mmc->caps & MMC_CAP_SDIO_IRQ)
-               mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+       ret = dw_mci_init_slot_caps(slot);
+       if (ret)
+               goto err_host_allocated;
 
        /* Useful defaults if platform data is unset. */
        if (host->use_dma == TRANS_MODE_IDMAC) {
index e3124f0..1424bd4 100644 (file)
@@ -543,6 +543,7 @@ struct dw_mci_slot {
 /**
  * dw_mci driver data - dw-mshc implementation specific driver data.
  * @caps: mmc subsystem specified capabilities of the controller(s).
+ * @num_caps: number of capabilities specified by @caps.
  * @init: early implementation specific initialization.
  * @set_ios: handle bus specific extensions.
  * @parse_dt: parse implementation specific device tree properties.
@@ -554,6 +555,7 @@ struct dw_mci_slot {
  */
 struct dw_mci_drv_data {
        unsigned long   *caps;
+       u32             num_caps;
        int             (*init)(struct dw_mci *host);
        void            (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
        int             (*parse_dt)(struct dw_mci *host);
index 6d1a983..82c4f05 100644 (file)
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
        slot->chip->rpm_retune = intel_host->d3_retune;
 }
 
-static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       int err = sdhci_execute_tuning(mmc, opcode);
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       if (err)
+               return err;
+
+       /*
+        * Tuning can leave the IP in an active state (Buffer Read Enable bit
+        * set) which prevents the entry to low power states (i.e. S0i3). Data
+        * reset will clear it.
+        */
+       sdhci_reset(host, SDHCI_RESET_DATA);
+
+       return 0;
+}
+
+static void byt_probe_slot(struct sdhci_pci_slot *slot)
 {
+       struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+
        byt_read_dsm(slot);
+
+       ops->execute_tuning = intel_execute_tuning;
+}
+
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+       byt_probe_slot(slot);
        slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
                                 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
                                 MMC_CAP_CMD_DURING_TFR |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
        int err;
 
-       byt_read_dsm(slot);
+       byt_probe_slot(slot);
 
        err = ni_set_max_freq(slot);
        if (err)
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 
 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
-       byt_read_dsm(slot);
+       byt_probe_slot(slot);
        slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
                                 MMC_CAP_WAIT_WHILE_BUSY;
        return 0;
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 
 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
 {
-       byt_read_dsm(slot);
+       byt_probe_slot(slot);
        slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
                                 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
        slot->cd_idx = 0;
index f5c87bd..f27f9ba 100644 (file)
@@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
        if (ndev->features & NETIF_F_RXCSUM)
                gfar_rx_checksum(skb, fcb);
 
-       /* Tell the skb what kind of packet this is */
-       skb->protocol = eth_type_trans(skb, ndev);
-
        /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
         * Even if vlan rx accel is disabled, on some chips
         * RXFCB_VLN is pseudo randomly set.
@@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                        continue;
                }
 
+               gfar_process_frame(ndev, skb);
+
                /* Increment the number of packets */
                total_pkts++;
                total_bytes += skb->len;
 
                skb_record_rx_queue(skb, rx_queue->qindex);
 
-               gfar_process_frame(ndev, skb);
+               skb->protocol = eth_type_trans(skb, ndev);
 
                /* Send the packet up the stack */
                napi_gro_receive(&rx_queue->grp->napi_rx, skb);
index b032091..8536942 100644 (file)
@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
                                     ixgbe_rx_pg_size(rx_ring),
                                     DMA_FROM_DEVICE,
                                     IXGBE_RX_DMA_ATTR);
+       } else if (ring_uses_build_skb(rx_ring)) {
+               unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             IXGBE_CB(skb)->dma,
+                                             offset,
+                                             skb_headlen(skb),
+                                             DMA_FROM_DEVICE);
        } else {
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 
index f6963b0..122506d 100644 (file)
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
        MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
        MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
        MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
-       MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
-       MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
-       MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
-       MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
-       MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
-       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
-       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
-       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
-       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
        MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
        MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+       MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
+       MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
+       MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+       MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
+       MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
 };
 
-#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38
+#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
 
 struct mlxsw_afk_element_inst { /* element instance in actual block */
        const struct mlxsw_afk_element_info *info;
index 7c6204f..7884e8a 100644 (file)
@@ -1149,6 +1149,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
        }
 
        mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+       mlxsw_sp_port_vlan->ref_count = 1;
        mlxsw_sp_port_vlan->vid = vid;
        list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
 
@@ -1176,8 +1177,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
-       if (mlxsw_sp_port_vlan)
+       if (mlxsw_sp_port_vlan) {
+               mlxsw_sp_port_vlan->ref_count++;
                return mlxsw_sp_port_vlan;
+       }
 
        return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
 }
@@ -1186,6 +1189,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
 {
        struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
 
+       if (--mlxsw_sp_port_vlan->ref_count != 0)
+               return;
+
        if (mlxsw_sp_port_vlan->bridge_port)
                mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
        else if (fid)
@@ -3816,13 +3822,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
        .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
 };
 
-static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
-static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
-static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
-static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
-
 static void
-mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
+                                     struct devlink_resource_size_params *kvd_size_params,
+                                     struct devlink_resource_size_params *linear_size_params,
+                                     struct devlink_resource_size_params *hash_double_size_params,
+                                     struct devlink_resource_size_params *hash_single_size_params)
 {
        u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
                                                 KVD_SINGLE_MIN_SIZE);
@@ -3831,37 +3836,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
        u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
        u32 linear_size_min = 0;
 
-       /* KVD top resource */
-       mlxsw_sp_kvd_size_params.size_min = kvd_size;
-       mlxsw_sp_kvd_size_params.size_max = kvd_size;
-       mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
-       mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
-
-       /* Linear part init */
-       mlxsw_sp_linear_size_params.size_min = linear_size_min;
-       mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
-                                              double_size_min;
-       mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
-       mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
-
-       /* Hash double part init */
-       mlxsw_sp_hash_double_size_params.size_min = double_size_min;
-       mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
-                                                   linear_size_min;
-       mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
-       mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
-
-       /* Hash single part init */
-       mlxsw_sp_hash_single_size_params.size_min = single_size_min;
-       mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
-                                                   linear_size_min;
-       mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
-       mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+       devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
+                                         MLXSW_SP_KVD_GRANULARITY,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       devlink_resource_size_params_init(linear_size_params, linear_size_min,
+                                         kvd_size - single_size_min -
+                                         double_size_min,
+                                         MLXSW_SP_KVD_GRANULARITY,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       devlink_resource_size_params_init(hash_double_size_params,
+                                         double_size_min,
+                                         kvd_size - single_size_min -
+                                         linear_size_min,
+                                         MLXSW_SP_KVD_GRANULARITY,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       devlink_resource_size_params_init(hash_single_size_params,
+                                         single_size_min,
+                                         kvd_size - double_size_min -
+                                         linear_size_min,
+                                         MLXSW_SP_KVD_GRANULARITY,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
 }
 
 static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
 {
        struct devlink *devlink = priv_to_devlink(mlxsw_core);
+       struct devlink_resource_size_params hash_single_size_params;
+       struct devlink_resource_size_params hash_double_size_params;
+       struct devlink_resource_size_params linear_size_params;
+       struct devlink_resource_size_params kvd_size_params;
        u32 kvd_size, single_size, double_size, linear_size;
        const struct mlxsw_config_profile *profile;
        int err;
@@ -3870,13 +3873,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
        if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
                return -EIO;
 
-       mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+       mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
+                                             &linear_size_params,
+                                             &hash_double_size_params,
+                                             &hash_single_size_params);
+
        kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
        err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
                                        true, kvd_size,
                                        MLXSW_SP_RESOURCE_KVD,
                                        DEVLINK_RESOURCE_ID_PARENT_TOP,
-                                       &mlxsw_sp_kvd_size_params,
+                                       &kvd_size_params,
                                        NULL);
        if (err)
                return err;
@@ -3886,7 +3893,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
                                        false, linear_size,
                                        MLXSW_SP_RESOURCE_KVD_LINEAR,
                                        MLXSW_SP_RESOURCE_KVD,
-                                       &mlxsw_sp_linear_size_params,
+                                       &linear_size_params,
                                        &mlxsw_sp_resource_kvd_linear_ops);
        if (err)
                return err;
@@ -3904,7 +3911,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
                                        false, double_size,
                                        MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
                                        MLXSW_SP_RESOURCE_KVD,
-                                       &mlxsw_sp_hash_double_size_params,
+                                       &hash_double_size_params,
                                        NULL);
        if (err)
                return err;
@@ -3914,7 +3921,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
                                        false, single_size,
                                        MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
                                        MLXSW_SP_RESOURCE_KVD,
-                                       &mlxsw_sp_hash_single_size_params,
+                                       &hash_single_size_params,
                                        NULL);
        if (err)
                return err;
index d5e711d..92194a9 100644 (file)
@@ -199,6 +199,7 @@ struct mlxsw_sp_port_vlan {
        struct list_head list;
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct mlxsw_sp_fid *fid;
+       unsigned int ref_count;
        u16 vid;
        struct mlxsw_sp_bridge_port *bridge_port;
        struct list_head bridge_vlan_node;
index bbd238e..54262af 100644 (file)
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
        [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP]     = 1,
        [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL]                   = 1,
        [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST]                     = 1,
+       [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6]       = 1,
 };
 
 static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
        [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4]       = 1,
-       [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6]       = 1,
 };
 
 static const int *mlxsw_sp_packet_type_sfgc_types[] = {
index 917663a..c11c9a6 100644 (file)
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                                     bool dynamic)
 {
        char *sfd_pl;
+       u8 num_rec;
        int err;
 
        sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
        mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
                              mac, fid, action, local_port);
+       num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
-       kfree(sfd_pl);
+       if (err)
+               goto out;
+
+       if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+               err = -EBUSY;
 
+out:
+       kfree(sfd_pl);
        return err;
 }
 
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
                                       bool adding, bool dynamic)
 {
        char *sfd_pl;
+       u8 num_rec;
        int err;
 
        sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
        mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
                                  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
                                  lag_vid, lag_id);
+       num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
-       kfree(sfd_pl);
+       if (err)
+               goto out;
+
+       if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+               err = -EBUSY;
 
+out:
+       kfree(sfd_pl);
        return err;
 }
 
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
                                u16 fid, u16 mid_idx, bool adding)
 {
        char *sfd_pl;
+       u8 num_rec;
        int err;
 
        sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
        mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
        mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
                              MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+       num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+       if (err)
+               goto out;
+
+       if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+               err = -EBUSY;
+
+out:
        kfree(sfd_pl);
        return err;
 }
index d3e1bc0..3557fe3 100644 (file)
@@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
                     enum_index);
 }
 
+static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
+                            int enum_index)
+{
+       iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
+{
+       return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
        return mdp->reg_offset == sh_eth_offset_gigabit;
index 5bbaf9e..21047d5 100644 (file)
@@ -568,15 +568,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
        return mdp->tsu_addr + mdp->reg_offset[enum_index];
 }
 
-static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
-                                   int enum_index)
-{
-       iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
-}
-
-static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
-{
-       return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
-}
-
 #endif /* #ifndef __SH_ETH_H__ */
index 17e529a..0265d70 100644 (file)
@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
        if (unlikely(!net_device || net_device->destroy))
                return -ENODEV;
 
-       /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
-        * here before the negotiation with the host is finished and
-        * send_section_map may not be allocated yet.
-        */
-       if (unlikely(!net_device->send_section_map))
-               return -EAGAIN;
-
        nvchan = &net_device->chan_table[packet->q_idx];
        packet->send_buf_index = NETVSC_INVALID_INDEX;
        packet->cp_partial = false;
@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
        /* Send control message directly without accessing msd (Multi-Send
         * Data) field which may be changed during data packet processing.
         */
-       if (!skb) {
-               cur_send = packet;
-               goto send_now;
-       }
+       if (!skb)
+               return netvsc_send_pkt(device, packet, net_device, pb, skb);
 
        /* batch packets in send buffer if possible */
        msdp = &nvchan->msd;
@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
                }
        }
 
-send_now:
        if (cur_send)
                ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
 
@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
        if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
            work_done < budget &&
            napi_complete_done(napi, work_done) &&
-           hv_end_read(&channel->inbound)) {
+           hv_end_read(&channel->inbound) &&
+           napi_schedule_prep(napi)) {
                hv_begin_read(&channel->inbound);
-               napi_reschedule(napi);
+               __napi_schedule(napi);
        }
 
        /* Driver may overshoot since multiple packets per descriptor */
@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
                /* disable interupts from host */
                hv_begin_read(rbi);
 
-               __napi_schedule(&nvchan->napi);
+               __napi_schedule_irqoff(&nvchan->napi);
        }
 }
 
@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
                         netvsc_channel_cb, net_device->chan_table);
 
        if (ret != 0) {
-               netif_napi_del(&net_device->chan_table[0].napi);
                netdev_err(ndev, "unable to open channel: %d\n", ret);
                goto cleanup;
        }
@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 
        napi_enable(&net_device->chan_table[0].napi);
 
-       /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
-        * populated.
-        */
-       rcu_assign_pointer(net_device_ctx->nvdev, net_device);
-
        /* Connect with the NetVsp */
        ret = netvsc_connect_vsp(device, net_device, device_info);
        if (ret != 0) {
@@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
                goto close;
        }
 
+       /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
+        * populated.
+        */
+       rcu_assign_pointer(net_device_ctx->nvdev, net_device);
+
        return net_device;
 
 close:
@@ -1329,6 +1319,7 @@ close:
        vmbus_close(device->channel);
 
 cleanup:
+       netif_napi_del(&net_device->chan_table[0].napi);
        free_netvsc_device(&net_device->rcu);
 
        return ERR_PTR(ret);
index c5584c2..cdb78ee 100644 (file)
@@ -66,10 +66,36 @@ static int debug = -1;
 module_param(debug, int, S_IRUGO);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
-static void netvsc_set_multicast_list(struct net_device *net)
+static void netvsc_change_rx_flags(struct net_device *net, int change)
 {
-       struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+       struct net_device_context *ndev_ctx = netdev_priv(net);
+       struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+       int inc;
+
+       if (!vf_netdev)
+               return;
+
+       if (change & IFF_PROMISC) {
+               inc = (net->flags & IFF_PROMISC) ? 1 : -1;
+               dev_set_promiscuity(vf_netdev, inc);
+       }
+
+       if (change & IFF_ALLMULTI) {
+               inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
+               dev_set_allmulti(vf_netdev, inc);
+       }
+}
+
+static void netvsc_set_rx_mode(struct net_device *net)
+{
+       struct net_device_context *ndev_ctx = netdev_priv(net);
+       struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+       struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
+
+       if (vf_netdev) {
+               dev_uc_sync(vf_netdev, net);
+               dev_mc_sync(vf_netdev, net);
+       }
 
        rndis_filter_update(nvdev);
 }
@@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net)
                return ret;
        }
 
-       netif_tx_wake_all_queues(net);
-
        rdev = nvdev->extension;
-
-       if (!rdev->link_state)
+       if (!rdev->link_state) {
                netif_carrier_on(net);
+               netif_tx_wake_all_queues(net);
+       }
 
        if (vf_netdev) {
                /* Setting synthetic device up transparently sets
@@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
        rcu_read_lock();
        vf_netdev = rcu_dereference(ndc->vf_netdev);
        if (vf_netdev) {
-               txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
-               qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+               const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
+
+               if (vf_ops->ndo_select_queue)
+                       txq = vf_ops->ndo_select_queue(vf_netdev, skb,
+                                                      accel_priv, fallback);
+               else
+                       txq = fallback(vf_netdev, skb);
+
+               /* Record the queue selected by VF so that it can be
+                * used for common case where VF has more queues than
+                * the synthetic device.
+                */
+               qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
        } else {
                txq = netvsc_pick_tx(ndev, skb);
        }
@@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = {
        .ndo_open =                     netvsc_open,
        .ndo_stop =                     netvsc_close,
        .ndo_start_xmit =               netvsc_start_xmit,
-       .ndo_set_rx_mode =              netvsc_set_multicast_list,
+       .ndo_change_rx_flags =          netvsc_change_rx_flags,
+       .ndo_set_rx_mode =              netvsc_set_rx_mode,
        .ndo_change_mtu =               netvsc_change_mtu,
        .ndo_validate_addr =            eth_validate_addr,
        .ndo_set_mac_address =          netvsc_set_mac_addr,
@@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev,
                netdev_warn(vf_netdev,
                            "unable to change mtu to %u\n", ndev->mtu);
 
+       /* set multicast etc flags on VF */
+       dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
+       dev_uc_sync(vf_netdev, ndev);
+       dev_mc_sync(vf_netdev, ndev);
+
        if (netif_running(ndev)) {
                ret = dev_open(vf_netdev);
                if (ret)
index c3ca191..8927c48 100644 (file)
@@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w)
 {
        struct rndis_device *rdev
                = container_of(w, struct rndis_device, mcast_work);
+       u32 filter = NDIS_PACKET_TYPE_DIRECTED;
+       unsigned int flags = rdev->ndev->flags;
 
-       if (rdev->ndev->flags & IFF_PROMISC)
-               rndis_filter_set_packet_filter(rdev,
-                                              NDIS_PACKET_TYPE_PROMISCUOUS);
-       else
-               rndis_filter_set_packet_filter(rdev,
-                                              NDIS_PACKET_TYPE_BROADCAST |
-                                              NDIS_PACKET_TYPE_ALL_MULTICAST |
-                                              NDIS_PACKET_TYPE_DIRECTED);
+       if (flags & IFF_PROMISC) {
+               filter = NDIS_PACKET_TYPE_PROMISCUOUS;
+       } else {
+               if (flags & IFF_ALLMULTI)
+                       flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+               if (flags & IFF_BROADCAST)
+                       flags |= NDIS_PACKET_TYPE_BROADCAST;
+       }
+
+       rndis_filter_set_packet_filter(rdev, filter);
 }
 
 void rndis_filter_update(struct netvsc_device *nvdev)
@@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev,
 {
        struct rndis_device *rndis_dev = net_dev->extension;
 
+       /* Don't try and setup sub channels if about to halt */
+       cancel_work_sync(&net_dev->subchan_work);
+
        /* Halt and release the rndis device */
        rndis_filter_halt_device(rndis_dev);
 
index e3e29c2..a6f924f 100644 (file)
@@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev)
                break;
        case PHY_HALTED:
                /* if phy was suspended, bring the physical link up again */
-               phy_resume(phydev);
+               __phy_resume(phydev);
 
                /* make sure interrupts are re-enabled for the PHY */
                if (phy_interrupt_is_valid(phydev)) {
index d39ae77..478405e 100644 (file)
@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                goto no_resume;
 
-       mutex_lock(&phydev->lock);
        ret = phy_resume(phydev);
-       mutex_unlock(&phydev->lock);
        if (ret < 0)
                return ret;
 
@@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        if (err)
                goto error;
 
-       mutex_lock(&phydev->lock);
        phy_resume(phydev);
-       mutex_unlock(&phydev->lock);
        phy_led_triggers_register(phydev);
 
        return err;
@@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_suspend);
 
-int phy_resume(struct phy_device *phydev)
+int __phy_resume(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
        int ret = 0;
@@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev)
 
        return ret;
 }
+EXPORT_SYMBOL(__phy_resume);
+
+int phy_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       mutex_lock(&phydev->lock);
+       ret = __phy_resume(phydev);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
+}
 EXPORT_SYMBOL(phy_resume);
 
 int phy_loopback(struct phy_device *phydev, bool enable)
index a393c1d..7dc2f34 100644 (file)
@@ -3162,6 +3162,15 @@ ppp_connect_channel(struct channel *pch, int unit)
                goto outl;
 
        ppp_lock(ppp);
+       spin_lock_bh(&pch->downl);
+       if (!pch->chan) {
+               /* Don't connect unregistered channels */
+               spin_unlock_bh(&pch->downl);
+               ppp_unlock(ppp);
+               ret = -ENOTCONN;
+               goto outl;
+       }
+       spin_unlock_bh(&pch->downl);
        if (pch->file.hdrlen > ppp->file.hdrlen)
                ppp->file.hdrlen = pch->file.hdrlen;
        hdrlen = pch->file.hdrlen + 2;  /* for protocol bytes */
index d531954..475088f 100644 (file)
@@ -182,7 +182,6 @@ struct tun_file {
        struct tun_struct *detached;
        struct ptr_ring tx_ring;
        struct xdp_rxq_info xdp_rxq;
-       int xdp_pending_pkts;
 };
 
 struct tun_flow_entry {
@@ -1644,6 +1643,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
        else
                *skb_xdp = 0;
 
+       preempt_disable();
        rcu_read_lock();
        xdp_prog = rcu_dereference(tun->xdp_prog);
        if (xdp_prog && !*skb_xdp) {
@@ -1663,11 +1663,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                case XDP_REDIRECT:
                        get_page(alloc_frag->page);
                        alloc_frag->offset += buflen;
-                       ++tfile->xdp_pending_pkts;
                        err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
+                       xdp_do_flush_map();
                        if (err)
                                goto err_redirect;
                        rcu_read_unlock();
+                       preempt_enable();
                        return NULL;
                case XDP_TX:
                        xdp_xmit = true;
@@ -1689,6 +1690,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
        skb = build_skb(buf, buflen);
        if (!skb) {
                rcu_read_unlock();
+               preempt_enable();
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1701,10 +1703,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                skb->dev = tun->dev;
                generic_xdp_tx(skb, xdp_prog);
                rcu_read_unlock();
+               preempt_enable();
                return NULL;
        }
 
        rcu_read_unlock();
+       preempt_enable();
 
        return skb;
 
@@ -1712,6 +1716,7 @@ err_redirect:
        put_page(alloc_frag->page);
 err_xdp:
        rcu_read_unlock();
+       preempt_enable();
        this_cpu_inc(tun->pcpu_stats->rx_dropped);
        return NULL;
 }
@@ -1985,11 +1990,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
        result = tun_get_user(tun, tfile, NULL, from,
                              file->f_flags & O_NONBLOCK, false);
 
-       if (tfile->xdp_pending_pkts) {
-               tfile->xdp_pending_pkts = 0;
-               xdp_do_flush_map();
-       }
-
        tun_put(tun);
        return result;
 }
@@ -2382,13 +2382,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
        ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
                           m->msg_flags & MSG_DONTWAIT,
                           m->msg_flags & MSG_MORE);
-
-       if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
-           !(m->msg_flags & MSG_MORE)) {
-               tfile->xdp_pending_pkts = 0;
-               xdp_do_flush_map();
-       }
-
        tun_put(tun);
        return ret;
 }
@@ -3231,7 +3224,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
 
        memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-       tfile->xdp_pending_pkts = 0;
 
        return 0;
 }
index 05dca3e..fff4b13 100644 (file)
@@ -895,6 +895,12 @@ static const struct usb_device_id  products[] = {
                                      USB_CDC_SUBCLASS_ETHERNET,
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&wwan_info,
+}, {
+       /* Cinterion PLS8 modem by GEMALTO */
+       USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
index 958b2e8..86f7196 100644 (file)
@@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
 
                tx_data += len;
                agg->skb_len += len;
-               agg->skb_num++;
+               agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
 
                dev_kfree_skb_any(skb);
 
index 426dcf7..2337460 100644 (file)
@@ -2212,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
        }
 
        /* Make sure NAPI is not using any XDP TX queues for RX. */
-       for (i = 0; i < vi->max_queue_pairs; i++)
-               napi_disable(&vi->rq[i].napi);
+       if (netif_running(dev))
+               for (i = 0; i < vi->max_queue_pairs; i++)
+                       napi_disable(&vi->rq[i].napi);
 
        netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
@@ -2232,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                }
                if (old_prog)
                        bpf_prog_put(old_prog);
-               virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+               if (netif_running(dev))
+                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
        }
 
        return 0;
index afeca6b..ab8b3cb 100644 (file)
@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
                        ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
                                     0, NULL);
                        proto->restart_counter--;
-               } else
+               } else if (netif_carrier_ok(proto->dev))
+                       ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+                                    0, NULL);
+               else
                        ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
                                     0, NULL);
                break;
index 8328d39..3127bc8 100644 (file)
@@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev,
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
+               break;
+
        case XenbusStateUnknown:
+               wake_up_all(&module_unload_q);
                break;
 
        case XenbusStateInitWait:
@@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev)
                xenbus_switch_state(dev, XenbusStateClosing);
                wait_event(module_unload_q,
                           xenbus_read_driver_state(dev->otherend) ==
-                          XenbusStateClosing);
+                          XenbusStateClosing ||
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateUnknown);
 
                xenbus_switch_state(dev, XenbusStateClosed);
                wait_event(module_unload_q,
index 10041ac..06f8dcc 100644 (file)
@@ -335,8 +335,7 @@ static int pmem_attach_disk(struct device *dev,
                dev_warn(dev, "unable to guarantee persistence of writes\n");
                fua = 0;
        }
-       wbc = nvdimm_has_cache(nd_region) &&
-               !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
+       wbc = nvdimm_has_cache(nd_region);
 
        if (!devm_request_mem_region(dev, res->start, resource_size(res),
                                dev_name(&ndns->dev))) {
index 0fe7ea3..817e5e2 100644 (file)
@@ -2844,7 +2844,7 @@ out:
 }
 
 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
-               struct nvme_id_ns *id, bool *new)
+               struct nvme_id_ns *id)
 {
        struct nvme_ctrl *ctrl = ns->ctrl;
        bool is_shared = id->nmic & (1 << 0);
@@ -2860,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
                        ret = PTR_ERR(head);
                        goto out_unlock;
                }
-
-               *new = true;
        } else {
                struct nvme_ns_ids ids;
 
@@ -2873,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
                        ret = -EINVAL;
                        goto out_unlock;
                }
-
-               *new = false;
        }
 
        list_add_tail(&ns->siblings, &head->list);
@@ -2945,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        struct nvme_id_ns *id;
        char disk_name[DISK_NAME_LEN];
        int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
-       bool new = true;
 
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
@@ -2971,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (id->ncap == 0)
                goto out_free_id;
 
-       if (nvme_init_ns_head(ns, nsid, id, &new))
+       if (nvme_init_ns_head(ns, nsid, id))
                goto out_free_id;
        nvme_setup_streams_ns(ctrl, ns);
        
@@ -3037,8 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
                pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
                        ns->disk->disk_name);
 
-       if (new)
-               nvme_mpath_add_disk(ns->head);
+       nvme_mpath_add_disk(ns->head);
        nvme_mpath_add_disk_links(ns);
        return;
  out_unlink_ns:
index 5dd4cee..a1c58e3 100644 (file)
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
  */
 int nvmf_register_transport(struct nvmf_transport_ops *ops)
 {
-       if (!ops->create_ctrl || !ops->module)
+       if (!ops->create_ctrl)
                return -EINVAL;
 
        down_write(&nvmf_transports_rwsem);
index 3b211d9..b7e5c6d 100644 (file)
@@ -198,11 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
-       device_add_disk(&head->subsys->dev, head->disk);
-       if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
-                       &nvme_ns_id_attr_group))
-               pr_warn("%s: failed to create sysfs group for identification\n",
-                       head->disk->disk_name);
+
+       mutex_lock(&head->subsys->lock);
+       if (!(head->disk->flags & GENHD_FL_UP)) {
+               device_add_disk(&head->subsys->dev, head->disk);
+               if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
+                               &nvme_ns_id_attr_group))
+                       pr_warn("%s: failed to create sysfs group for identification\n",
+                               head->disk->disk_name);
+       }
+       mutex_unlock(&head->subsys->lock);
 }
 
 void nvme_mpath_add_disk_links(struct nvme_ns *ns)
index 73036d2..5933a5c 100644 (file)
@@ -1459,7 +1459,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
-               return result;
+               goto release_vector;
 
        result = adapter_alloc_sq(dev, qid, nvmeq);
        if (result < 0)
@@ -1473,9 +1473,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        return result;
 
  release_sq:
+       dev->online_queues--;
        adapter_delete_sq(dev, qid);
  release_cq:
        adapter_delete_cq(dev, qid);
+ release_vector:
+       nvmeq->cq_vector = -1;
        return result;
 }
 
index 3a51ed5..4d84a73 100644 (file)
@@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
 
-       if (!blk_rq_bytes(rq))
+       if (!blk_rq_payload_bytes(rq))
                return;
 
        if (req->mr) {
@@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
-       if (!blk_rq_bytes(rq))
+       if (!blk_rq_payload_bytes(rq))
                return nvme_rdma_set_sg_null(c);
 
        req->sg_table.sgl = req->first_sgl;
index 0bd7371..a78029e 100644 (file)
@@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
                goto fail;
        }
 
-       /* either variant of SGLs is fine, as we don't support metadata */
-       if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
-                    (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
+       /*
+        * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
+        * contains an address of a single contiguous physical buffer that is
+        * byte aligned.
+        */
+       if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto fail;
        }
index 7991ec3..861d150 100644 (file)
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                return BLK_STS_OK;
        }
 
-       if (blk_rq_bytes(req)) {
+       if (blk_rq_payload_bytes(req)) {
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
@@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
                iod->req.sg = iod->sg_table.sgl;
                iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
-               iod->req.transfer_len = blk_rq_bytes(req);
+               iod->req.transfer_len = blk_rq_payload_bytes(req);
        }
 
        blk_mq_start_request(req);
index 369d48d..3654472 100644 (file)
@@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno)
        struct resource *res = dev->resource + resno;
 
        pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
+
+       if (!res->parent)
+               return;
+
        release_resource(res);
        res->end = resource_size(res) - 1;
        res->start = 0;
index 1fda9d6..4b91ff7 100644 (file)
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = {
        "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x",
 };
 
-static const char * const uart_ao_b_gpioz_groups[] = {
+static const char * const uart_ao_b_z_groups[] = {
        "uart_ao_tx_b_z", "uart_ao_rx_b_z",
        "uart_ao_cts_b_z", "uart_ao_rts_b_z",
 };
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
        FUNCTION(nand),
        FUNCTION(uart_a),
        FUNCTION(uart_b),
-       FUNCTION(uart_ao_b_gpioz),
+       FUNCTION(uart_ao_b_z),
        FUNCTION(i2c0),
        FUNCTION(i2c1),
        FUNCTION(i2c2),
index d1a0131..5e3df19 100644 (file)
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
 
+       device_init_wakeup(&device->dev, false);
        acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
        intel_hid_set_enable(&device->dev, false);
        intel_button_array_enable(&device->dev, false);
index b703d6f..c13780b 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <linux/input.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/kernel.h>
@@ -97,9 +98,35 @@ out_unknown:
        dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
 }
 
-static int intel_vbtn_probe(struct platform_device *device)
+static void detect_tablet_mode(struct platform_device *device)
 {
+       const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+       struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+       acpi_handle handle = ACPI_HANDLE(&device->dev);
        struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       acpi_status status;
+       int m;
+
+       if (!(chassis_type && strcmp(chassis_type, "31") == 0))
+               goto out;
+
+       status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
+       if (ACPI_FAILURE(status))
+               goto out;
+
+       obj = vgbs_output.pointer;
+       if (!(obj && obj->type == ACPI_TYPE_INTEGER))
+               goto out;
+
+       m = !(obj->integer.value & TABLET_MODE_FLAG);
+       input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
+out:
+       kfree(vgbs_output.pointer);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
        acpi_handle handle = ACPI_HANDLE(&device->dev);
        struct intel_vbtn_priv *priv;
        acpi_status status;
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device)
                return err;
        }
 
-       /*
-        * VGBS being present and returning something means we have
-        * a tablet mode switch.
-        */
-       status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
-       if (ACPI_SUCCESS(status)) {
-               union acpi_object *obj = vgbs_output.pointer;
-
-               if (obj && obj->type == ACPI_TYPE_INTEGER) {
-                       int m = !(obj->integer.value & TABLET_MODE_FLAG);
-
-                       input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
-               }
-       }
-
-       kfree(vgbs_output.pointer);
+       detect_tablet_mode(device);
 
        status = acpi_install_notify_handler(handle,
                                             ACPI_DEVICE_NOTIFY,
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
 
+       device_init_wakeup(&device->dev, false);
        acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
 
        /*
index c0c8945..8796211 100644 (file)
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev)
                wblock->char_dev.mode = 0444;
                ret = misc_register(&wblock->char_dev);
                if (ret) {
-                       dev_warn(dev, "failed to register char dev: %d", ret);
+                       dev_warn(dev, "failed to register char dev: %d\n", ret);
                        ret = -ENOMEM;
                        goto probe_misc_failure;
                }
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
 
        if (result) {
                dev_warn(wmi_bus_dev,
-                        "%s data block query control method not found",
+                        "%s data block query control method not found\n",
                         method);
                return result;
        }
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
 
                retval = device_add(&wblock->dev.dev);
                if (retval) {
-                       dev_err(wmi_bus_dev, "failed to register %pULL\n",
+                       dev_err(wmi_bus_dev, "failed to register %pUL\n",
                                wblock->gblock.guid);
                        if (debug_event)
                                wmi_method_enable(wblock, 0);
index ca72f33..c8b308c 100644 (file)
@@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len,
        }
        reply->callback = reply_cb;
        reply->param = reply_param;
-       if (card->state == CARD_STATE_DOWN)
-               reply->seqno = QETH_IDX_COMMAND_SEQNO;
-       else
-               reply->seqno = card->seqno.ipa++;
+
        init_waitqueue_head(&reply->wait_q);
-       spin_lock_irqsave(&card->lock, flags);
-       list_add_tail(&reply->list, &card->cmd_waiter_list);
-       spin_unlock_irqrestore(&card->lock, flags);
 
        while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
-       qeth_prepare_control_data(card, len, iob);
 
        if (IS_IPA(iob->data)) {
                cmd = __ipa_cmd(iob);
+               cmd->hdr.seqno = card->seqno.ipa++;
+               reply->seqno = cmd->hdr.seqno;
                event_timeout = QETH_IPA_TIMEOUT;
        } else {
+               reply->seqno = QETH_IDX_COMMAND_SEQNO;
                event_timeout = QETH_TIMEOUT;
        }
+       qeth_prepare_control_data(card, len, iob);
+
+       spin_lock_irqsave(&card->lock, flags);
+       list_add_tail(&reply->list, &card->cmd_waiter_list);
+       spin_unlock_irqrestore(&card->lock, flags);
 
        timeout = jiffies + event_timeout;
 
@@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
        memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
        cmd->hdr.command = command;
        cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
-       cmd->hdr.seqno = card->seqno.ipa;
+       /* cmd->hdr.seqno is set by qeth_send_control_data() */
        cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
        cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
        if (card->options.layer2)
@@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
 int qeth_get_elements_no(struct qeth_card *card,
                     struct sk_buff *skb, int extra_elems, int data_offset)
 {
-       int elements = qeth_get_elements_for_range(
-                               (addr_t)skb->data + data_offset,
-                               (addr_t)skb->data + skb_headlen(skb)) +
-                       qeth_get_elements_for_frags(skb);
+       addr_t end = (addr_t)skb->data + skb_headlen(skb);
+       int elements = qeth_get_elements_for_frags(skb);
+       addr_t start = (addr_t)skb->data + data_offset;
+
+       if (start != end)
+               elements += qeth_get_elements_for_range(start, end);
 
        if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
                QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
index bdd45f4..498fe9a 100644 (file)
@@ -40,8 +40,40 @@ struct qeth_ipaddr {
                        unsigned int pfxlen;
                } a6;
        } u;
-
 };
+
+static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
+                                        struct qeth_ipaddr *a2)
+{
+       if (a1->proto != a2->proto)
+               return false;
+       if (a1->proto == QETH_PROT_IPV6)
+               return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
+       return a1->u.a4.addr == a2->u.a4.addr;
+}
+
+static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
+                                         struct qeth_ipaddr *a2)
+{
+       /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
+        * so 'proto' and 'addr' match for sure.
+        *
+        * For ucast:
+        * -    'mac' is always 0.
+        * -    'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
+        *      values are required to avoid mixups in takeover eligibility.
+        *
+        * For mcast,
+        * -    'mac' is mapped from the IP, and thus always matches.
+        * -    'mask'/'pfxlen' is always 0.
+        */
+       if (a1->type != a2->type)
+               return false;
+       if (a1->proto == QETH_PROT_IPV6)
+               return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
+       return a1->u.a4.mask == a2->u.a4.mask;
+}
+
 static inline  u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
 {
        u64  ret = 0;
index b0c888e..962a04b 100644 (file)
@@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
                qeth_l3_ipaddr6_to_string(addr, buf);
 }
 
+static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
+                                                  struct qeth_ipaddr *query)
+{
+       u64 key = qeth_l3_ipaddr_hash(query);
+       struct qeth_ipaddr *addr;
+
+       if (query->is_multicast) {
+               hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
+                       if (qeth_l3_addr_match_ip(addr, query))
+                               return addr;
+       } else {
+               hash_for_each_possible(card->ip_htable,  addr, hnode, key)
+                       if (qeth_l3_addr_match_ip(addr, query))
+                               return addr;
+       }
+       return NULL;
+}
+
 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
 {
        int i, j;
@@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
        return rc;
 }
 
-inline int
-qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
-{
-       return addr1->proto == addr2->proto &&
-              !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
-              ether_addr_equal_64bits(addr1->mac, addr2->mac);
-}
-
-static struct qeth_ipaddr *
-qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
-{
-       struct qeth_ipaddr *addr;
-
-       if (tmp_addr->is_multicast) {
-               hash_for_each_possible(card->ip_mc_htable,  addr,
-                               hnode, qeth_l3_ipaddr_hash(tmp_addr))
-                       if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
-                               return addr;
-       } else {
-               hash_for_each_possible(card->ip_htable,  addr,
-                               hnode, qeth_l3_ipaddr_hash(tmp_addr))
-                       if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
-                               return addr;
-       }
-
-       return NULL;
-}
-
 int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
 {
        int rc = 0;
@@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
        }
 
-       addr = qeth_l3_ip_from_hash(card, tmp_addr);
-       if (!addr)
+       addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
+       if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
                return -ENOENT;
 
        addr->ref_counter--;
-       if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
-                                     addr->type == QETH_IP_TYPE_RXIP))
+       if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
                return rc;
        if (addr->in_progress)
                return -EINPROGRESS;
 
-       if (!qeth_card_hw_is_reachable(card)) {
-               addr->disp_flag = QETH_DISP_ADDR_DELETE;
-               return 0;
-       }
-
-       rc = qeth_l3_deregister_addr_entry(card, addr);
+       if (qeth_card_hw_is_reachable(card))
+               rc = qeth_l3_deregister_addr_entry(card, addr);
 
        hash_del(&addr->hnode);
        kfree(addr);
@@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
 {
        int rc = 0;
        struct qeth_ipaddr *addr;
+       char buf[40];
 
        QETH_CARD_TEXT(card, 4, "addip");
 
@@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
        }
 
-       addr = qeth_l3_ip_from_hash(card, tmp_addr);
-       if (!addr) {
+       addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
+       if (addr) {
+               if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
+                       return -EADDRINUSE;
+               if (qeth_l3_addr_match_all(addr, tmp_addr)) {
+                       addr->ref_counter++;
+                       return 0;
+               }
+               qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
+                                        buf);
+               dev_warn(&card->gdev->dev,
+                        "Registering IP address %s failed\n", buf);
+               return -EADDRINUSE;
+       } else {
                addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
                if (!addr)
                        return -ENOMEM;
@@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
                                (rc == IPA_RC_LAN_OFFLINE)) {
                        addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                        if (addr->ref_counter < 1) {
-                               qeth_l3_delete_ip(card, addr);
+                               qeth_l3_deregister_addr_entry(card, addr);
+                               hash_del(&addr->hnode);
                                kfree(addr);
                        }
                } else {
                        hash_del(&addr->hnode);
                        kfree(addr);
                }
-       } else {
-               if (addr->type == QETH_IP_TYPE_NORMAL ||
-                   addr->type == QETH_IP_TYPE_RXIP)
-                       addr->ref_counter++;
        }
-
        return rc;
 }
 
@@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
        spin_lock_bh(&card->ip_lock);
 
        hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
-                       qeth_l3_deregister_addr_entry(card, addr);
-                       hash_del(&addr->hnode);
-                       kfree(addr);
-               } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+               if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
                        if (addr->proto == QETH_PROT_IPV4) {
                                addr->in_progress = 1;
                                spin_unlock_bh(&card->ip_lock);
@@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
                return -ENOMEM;
 
        spin_lock_bh(&card->ip_lock);
-
-       if (qeth_l3_ip_from_hash(card, ipaddr))
-               rc = -EEXIST;
-       else
-               rc = qeth_l3_add_ip(card, ipaddr);
-
+       rc = qeth_l3_add_ip(card, ipaddr);
        spin_unlock_bh(&card->ip_lock);
 
        kfree(ipaddr);
@@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
                return -ENOMEM;
 
        spin_lock_bh(&card->ip_lock);
-
-       if (qeth_l3_ip_from_hash(card, ipaddr))
-               rc = -EEXIST;
-       else
-               rc = qeth_l3_add_ip(card, ipaddr);
-
+       rc = qeth_l3_add_ip(card, ipaddr);
        spin_unlock_bh(&card->ip_lock);
 
        kfree(ipaddr);
@@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                tmp->is_multicast = 1;
 
-               ipm = qeth_l3_ip_from_hash(card, tmp);
+               ipm = qeth_l3_find_addr_by_ip(card, tmp);
                if (ipm) {
+                       /* for mcast, by-IP match means full match */
                        ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                } else {
                        ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
                       sizeof(struct in6_addr));
                tmp->is_multicast = 1;
 
-               ipm = qeth_l3_ip_from_hash(card, tmp);
+               ipm = qeth_l3_find_addr_by_ip(card, tmp);
                if (ipm) {
+                       /* for mcast, by-IP match means full match */
                        ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                        continue;
                }
@@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
                        struct sk_buff *skb, int extra_elems)
 {
-       addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
-       int elements = qeth_get_elements_for_range(
-                               tcpdptr,
-                               (addr_t)skb->data + skb_headlen(skb)) +
-                               qeth_get_elements_for_frags(skb);
+       addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
+       addr_t end = (addr_t)skb->data + skb_headlen(skb);
+       int elements = qeth_get_elements_for_frags(skb);
+
+       if (start != end)
+               elements += qeth_get_elements_for_range(start, end);
 
        if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
                QETH_DBF_MESSAGE(2,
index 53f7275..750f931 100644 (file)
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
                if (i == 1) {
                        domain->supply = devm_regulator_get(dev, "pu");
                        if (IS_ERR(domain->supply))
-                               return PTR_ERR(domain->supply);;
+                               return PTR_ERR(domain->supply);
 
                        ret = imx_pgc_get_clocks(dev, domain);
                        if (ret)
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev)
 
 static int imx_gpc_remove(struct platform_device *pdev)
 {
+       struct device_node *pgc_node;
        int ret;
 
+       pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+
+       /* bail out if DT too old and doesn't provide the necessary info */
+       if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+           !pgc_node)
+               return 0;
+
        /*
         * If the old DT binding is used the toplevel driver needs to
         * de-register the power domains
         */
-       if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) {
+       if (!pgc_node) {
                of_genpd_del_provider(pdev->dev.of_node);
 
                ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
index e30e29a..45657e2 100644 (file)
@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 {
        struct page *page[1];
        struct vm_area_struct *vma;
+       struct vm_area_struct *vmas[1];
        int ret;
 
        if (mm == current->mm) {
-               ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
-                                         page);
+               ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
+                                             page, vmas);
        } else {
                unsigned int flags = 0;
 
@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 
                down_read(&mm->mmap_sem);
                ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
-                                           NULL, NULL);
+                                           vmas, NULL);
+               /*
+                * The lifetime of a vaddr_get_pfn() page pin is
+                * userspace-controlled. In the fs-dax case this could
+                * lead to indefinite stalls in filesystem operations.
+                * Disallow attempts to pin fs-dax pages via this
+                * interface.
+                */
+               if (ret > 0 && vma_is_fsdax(vmas[0])) {
+                       ret = -EOPNOTSUPP;
+                       put_page(page[0]);
+               }
                up_read(&mm->mmap_sem);
        }
 
index aff773b..37460cd 100644 (file)
@@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG
 config RAVE_SP_WATCHDOG
        tristate "RAVE SP Watchdog timer"
        depends on RAVE_SP_CORE
+       depends on NVMEM || !NVMEM
        select WATCHDOG_CORE
        help
          Support for the watchdog on RAVE SP device.
@@ -903,6 +904,7 @@ config F71808E_WDT
 config SP5100_TCO
        tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
        depends on X86 && PCI
+       select WATCHDOG_CORE
        ---help---
          Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
          (Total Cost of Ownership) timer is a watchdog timer that will reboot
@@ -1008,6 +1010,7 @@ config WAFER_WDT
 config I6300ESB_WDT
        tristate "Intel 6300ESB Timer/Watchdog"
        depends on PCI
+       select WATCHDOG_CORE
        ---help---
          Hardware driver for the watchdog timer built into the Intel
          6300ESB controller hub.
@@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V
 config XEN_WDT
        tristate "Xen Watchdog support"
        depends on XEN
+       select WATCHDOG_CORE
        help
          Say Y here to support the hypervisor watchdog capability provided
          by Xen 4.0 and newer.  The watchdog timeout period is normally one
index 1ab4bd1..762378f 100644 (file)
@@ -755,8 +755,8 @@ out:
        mutex_unlock(&irq_mapping_update_lock);
        return irq;
 error_irq:
-       for (; i >= 0; i--)
-               __unbind_from_irq(irq + i);
+       while (nvec--)
+               __unbind_from_irq(irq + nvec);
        mutex_unlock(&irq_mapping_update_lock);
        return ret;
 }
index 156e5ae..b1092fb 100644 (file)
@@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
                                        sock);
        if (!map) {
                ret = -EFAULT;
-               sock_release(map->sock);
+               sock_release(sock);
        }
 
 out:
index aedbee3..2f11ca7 100644 (file)
@@ -73,20 +73,25 @@ struct sock_mapping {
                        wait_queue_head_t inflight_conn_req;
                } active;
                struct {
-               /* Socket status */
+               /*
+                * Socket status, needs to be 64-bit aligned due to the
+                * test_and_* functions which have this requirement on arm64.
+                */
 #define PVCALLS_STATUS_UNINITALIZED  0
 #define PVCALLS_STATUS_BIND          1
 #define PVCALLS_STATUS_LISTEN        2
-                       uint8_t status;
+                       uint8_t status __attribute__((aligned(8)));
                /*
                 * Internal state-machine flags.
                 * Only one accept operation can be inflight for a socket.
                 * Only one poll operation can be inflight for a given socket.
+                * flags needs to be 64-bit aligned due to the test_and_*
+                * functions which have this requirement on arm64.
                 */
 #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
 #define PVCALLS_FLAG_POLL_INFLIGHT   1
 #define PVCALLS_FLAG_POLL_RET        2
-                       uint8_t flags;
+                       uint8_t flags __attribute__((aligned(8)));
                        uint32_t inflight_req_id;
                        struct sock_mapping *accept_map;
                        wait_queue_head_t inflight_accept_req;
index 4a181fc..fe09ef9 100644 (file)
@@ -1058,6 +1058,27 @@ retry:
        return 0;
 }
 
+static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
+{
+       struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
+
+       if (!disk)
+               return NULL;
+       /*
+        * Now that we hold gendisk reference we make sure bdev we looked up is
+        * not stale. If it is, it means device got removed and created before
+        * we looked up gendisk and we fail open in such case. Associating
+        * unhashed bdev with newly created gendisk could lead to two bdevs
+        * (and thus two independent caches) being associated with one device
+        * which is bad.
+        */
+       if (inode_unhashed(bdev->bd_inode)) {
+               put_disk_and_module(disk);
+               return NULL;
+       }
+       return disk;
+}
+
 /**
  * bd_start_claiming - start claiming a block device
  * @bdev: block device of interest
@@ -1094,7 +1115,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
         * @bdev might not have been initialized properly yet, look up
         * and grab the outer block device the hard way.
         */
-       disk = get_gendisk(bdev->bd_dev, &partno);
+       disk = bdev_get_gendisk(bdev, &partno);
        if (!disk)
                return ERR_PTR(-ENXIO);
 
@@ -1111,8 +1132,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
        else
                whole = bdgrab(bdev);
 
-       module_put(disk->fops->owner);
-       put_disk(disk);
+       put_disk_and_module(disk);
        if (!whole)
                return ERR_PTR(-ENOMEM);
 
@@ -1407,10 +1427,10 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
 {
        struct gendisk *disk;
-       struct module *owner;
        int ret;
        int partno;
        int perm = 0;
+       bool first_open = false;
 
        if (mode & FMODE_READ)
                perm |= MAY_READ;
@@ -1430,14 +1450,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
  restart:
 
        ret = -ENXIO;
-       disk = get_gendisk(bdev->bd_dev, &partno);
+       disk = bdev_get_gendisk(bdev, &partno);
        if (!disk)
                goto out;
-       owner = disk->fops->owner;
 
        disk_block_events(disk);
        mutex_lock_nested(&bdev->bd_mutex, for_part);
        if (!bdev->bd_openers) {
+               first_open = true;
                bdev->bd_disk = disk;
                bdev->bd_queue = disk->queue;
                bdev->bd_contains = bdev;
@@ -1463,8 +1483,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                        bdev->bd_queue = NULL;
                                        mutex_unlock(&bdev->bd_mutex);
                                        disk_unblock_events(disk);
-                                       put_disk(disk);
-                                       module_put(owner);
+                                       put_disk_and_module(disk);
                                        goto restart;
                                }
                        }
@@ -1524,15 +1543,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        if (ret)
                                goto out_unlock_bdev;
                }
-               /* only one opener holds refs to the module and disk */
-               put_disk(disk);
-               module_put(owner);
        }
        bdev->bd_openers++;
        if (for_part)
                bdev->bd_part_count++;
        mutex_unlock(&bdev->bd_mutex);
        disk_unblock_events(disk);
+       /* only one opener holds refs to the module and disk */
+       if (!first_open)
+               put_disk_and_module(disk);
        return 0;
 
  out_clear:
@@ -1546,8 +1565,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
  out_unlock_bdev:
        mutex_unlock(&bdev->bd_mutex);
        disk_unblock_events(disk);
-       put_disk(disk);
-       module_put(owner);
+       put_disk_and_module(disk);
  out:
        bdput(bdev);
 
@@ -1770,8 +1788,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                        disk->fops->release(disk, mode);
        }
        if (!bdev->bd_openers) {
-               struct module *owner = disk->fops->owner;
-
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
@@ -1779,8 +1795,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
 
-               put_disk(disk);
-               module_put(owner);
+               put_disk_and_module(disk);
        }
        mutex_unlock(&bdev->bd_mutex);
        bdput(bdev);
index 1a462ab..da30877 100644 (file)
@@ -2974,7 +2974,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
        kfree(fs_info->super_copy);
        kfree(fs_info->super_for_commit);
        security_free_mnt_opts(&fs_info->security_opts);
-       kfree(fs_info);
+       kvfree(fs_info);
 }
 
 /* tree mod log functions from ctree.c */
@@ -3095,7 +3095,10 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
                          u64 inode_objectid, u64 ref_objectid, int ins_len,
                          int cow);
 
-int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
+int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot,
+                              const char *name,
+                              int name_len, struct btrfs_inode_ref **ref_ret);
+int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot,
                                   u64 ref_objectid, const char *name,
                                   int name_len,
                                   struct btrfs_inode_extref **extref_ret);
index 39c968f..65e1a76 100644 (file)
 #include "transaction.h"
 #include "print-tree.h"
 
-static int find_name_in_backref(struct btrfs_path *path, const char *name,
-                        int name_len, struct btrfs_inode_ref **ref_ret)
+int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot,
+                              const char *name,
+                              int name_len, struct btrfs_inode_ref **ref_ret)
 {
-       struct extent_buffer *leaf;
        struct btrfs_inode_ref *ref;
        unsigned long ptr;
        unsigned long name_ptr;
@@ -33,9 +33,8 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name,
        u32 cur_offset = 0;
        int len;
 
-       leaf = path->nodes[0];
-       item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-       ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+       item_size = btrfs_item_size_nr(leaf, slot);
+       ptr = btrfs_item_ptr_offset(leaf, slot);
        while (cur_offset < item_size) {
                ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
                len = btrfs_inode_ref_name_len(leaf, ref);
@@ -44,18 +43,19 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name,
                if (len != name_len)
                        continue;
                if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) {
-                       *ref_ret = ref;
+                       if (ref_ret)
+                               *ref_ret = ref;
                        return 1;
                }
        }
        return 0;
 }
 
-int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid,
+int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot,
+                                  u64 ref_objectid,
                                   const char *name, int name_len,
                                   struct btrfs_inode_extref **extref_ret)
 {
-       struct extent_buffer *leaf;
        struct btrfs_inode_extref *extref;
        unsigned long ptr;
        unsigned long name_ptr;
@@ -63,9 +63,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid,
        u32 cur_offset = 0;
        int ref_name_len;
 
-       leaf = path->nodes[0];
-       item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-       ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+       item_size = btrfs_item_size_nr(leaf, slot);
+       ptr = btrfs_item_ptr_offset(leaf, slot);
 
        /*
         * Search all extended backrefs in this item. We're only
@@ -113,7 +112,9 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
                return ERR_PTR(ret);
        if (ret > 0)
                return NULL;
-       if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref))
+       if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
+                                           ref_objectid, name, name_len,
+                                           &extref))
                return NULL;
        return extref;
 }
@@ -155,7 +156,8 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
         * This should always succeed so error here will make the FS
         * readonly.
         */
-       if (!btrfs_find_name_in_ext_backref(path, ref_objectid,
+       if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
+                                           ref_objectid,
                                            name, name_len, &extref)) {
                btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
                ret = -EROFS;
@@ -225,7 +227,8 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
        } else if (ret < 0) {
                goto out;
        }
-       if (!find_name_in_backref(path, name, name_len, &ref)) {
+       if (!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+                                       name, name_len, &ref)) {
                ret = -ENOENT;
                search_ext_refs = 1;
                goto out;
@@ -293,7 +296,9 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                      ins_len);
        if (ret == -EEXIST) {
-               if (btrfs_find_name_in_ext_backref(path, ref_objectid,
+               if (btrfs_find_name_in_ext_backref(path->nodes[0],
+                                                  path->slots[0],
+                                                  ref_objectid,
                                                   name, name_len, NULL))
                        goto out;
 
@@ -351,7 +356,8 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
        if (ret == -EEXIST) {
                u32 old_size;
 
-               if (find_name_in_backref(path, name, name_len, &ref))
+               if (btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+                                              name, name_len, &ref))
                        goto out;
 
                old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
@@ -365,7 +371,9 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                ret = 0;
        } else if (ret < 0) {
                if (ret == -EOVERFLOW) {
-                       if (find_name_in_backref(path, name, name_len, &ref))
+                       if (btrfs_find_name_in_backref(path->nodes[0],
+                                                      path->slots[0],
+                                                      name, name_len, &ref))
                                ret = -EEXIST;
                        else
                                ret = -EMLINK;
index a79299a..f534701 100644 (file)
@@ -2043,12 +2043,15 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
                             struct inode *inode, struct list_head *list)
 {
        struct btrfs_ordered_sum *sum;
+       int ret;
 
        list_for_each_entry(sum, list, list) {
                trans->adding_csums = true;
-               btrfs_csum_file_blocks(trans,
+               ret = btrfs_csum_file_blocks(trans,
                       BTRFS_I(inode)->root->fs_info->csum_root, sum);
                trans->adding_csums = false;
+               if (ret)
+                       return ret;
        }
        return 0;
 }
@@ -3062,7 +3065,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                goto out;
        }
 
-       add_pending_csums(trans, inode, &ordered_extent->list);
+       ret = add_pending_csums(trans, inode, &ordered_extent->list);
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               goto out;
+       }
 
        btrfs_ordered_update_i_size(inode, 0, ordered_extent);
        ret = btrfs_update_inode_fallback(trans, root, inode);
index f0c3f00..cd2298d 100644 (file)
@@ -3268,8 +3268,22 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        nr++;
                }
 
-               btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL,
-                                         0);
+               ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
+                                               NULL, 0);
+               if (ret) {
+                       unlock_page(page);
+                       put_page(page);
+                       btrfs_delalloc_release_metadata(BTRFS_I(inode),
+                                                        PAGE_SIZE);
+                       btrfs_delalloc_release_extents(BTRFS_I(inode),
+                                                      PAGE_SIZE);
+
+                       clear_extent_bits(&BTRFS_I(inode)->io_tree,
+                                         page_start, page_end,
+                                         EXTENT_LOCKED | EXTENT_BOUNDARY);
+                       goto out;
+
+               }
                set_page_dirty(page);
 
                unlock_extent(&BTRFS_I(inode)->io_tree,
index f306c60..484e2af 100644 (file)
@@ -5005,6 +5005,9 @@ static int send_hole(struct send_ctx *sctx, u64 end)
        u64 len;
        int ret = 0;
 
+       if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
+               return send_update_extent(sctx, offset, end - offset);
+
        p = fs_path_alloc();
        if (!p)
                return -ENOMEM;
index 6e71a2a..4b81794 100644 (file)
@@ -1545,7 +1545,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
         * it for searching for existing supers, so this lets us do that and
         * then open_ctree will properly initialize everything later.
         */
-       fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
+       fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
        if (!fs_info) {
                error = -ENOMEM;
                goto error_sec_opts;
index a8bafed..d11c70b 100644 (file)
@@ -423,7 +423,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
 {
        struct btrfs_fs_info *fs_info = to_fs_info(kobj);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
+       return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize);
 }
 
 BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -433,8 +433,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
 {
        struct btrfs_fs_info *fs_info = to_fs_info(kobj);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n",
-                       fs_info->super_copy->sectorsize);
+       return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
 }
 
 BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
@@ -444,8 +443,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
 {
        struct btrfs_fs_info *fs_info = to_fs_info(kobj);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n",
-                       fs_info->super_copy->sectorsize);
+       return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
 }
 
 BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
index 04f0714..9220f00 100644 (file)
@@ -1722,19 +1722,23 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
 
        super = fs_info->super_copy;
 
+       /* update latest btrfs_super_block::chunk_root refs */
        root_item = &fs_info->chunk_root->root_item;
-       super->chunk_root = root_item->bytenr;
-       super->chunk_root_generation = root_item->generation;
-       super->chunk_root_level = root_item->level;
+       btrfs_set_super_chunk_root(super, root_item->bytenr);
+       btrfs_set_super_chunk_root_generation(super, root_item->generation);
+       btrfs_set_super_chunk_root_level(super, root_item->level);
 
+       /* update latest btrfs_super_block::root refs */
        root_item = &fs_info->tree_root->root_item;
-       super->root = root_item->bytenr;
-       super->generation = root_item->generation;
-       super->root_level = root_item->level;
+       btrfs_set_super_root(super, root_item->bytenr);
+       btrfs_set_super_generation(super, root_item->generation);
+       btrfs_set_super_root_level(super, root_item->level);
+
        if (btrfs_test_opt(fs_info, SPACE_CACHE))
-               super->cache_generation = root_item->generation;
+               btrfs_set_super_cache_generation(super, root_item->generation);
        if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
-               super->uuid_tree_generation = root_item->generation;
+               btrfs_set_super_uuid_tree_generation(super,
+                                                    root_item->generation);
 }
 
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
index 4fd19b4..4344577 100644 (file)
@@ -967,7 +967,9 @@ static noinline int backref_in_log(struct btrfs_root *log,
        ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
 
        if (key->type == BTRFS_INODE_EXTREF_KEY) {
-               if (btrfs_find_name_in_ext_backref(path, ref_objectid,
+               if (btrfs_find_name_in_ext_backref(path->nodes[0],
+                                                  path->slots[0],
+                                                  ref_objectid,
                                                   name, namelen, NULL))
                        match = 1;
 
@@ -1191,7 +1193,8 @@ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
        read_extent_buffer(eb, *name, (unsigned long)&extref->name,
                           *namelen);
 
-       *index = btrfs_inode_extref_index(eb, extref);
+       if (index)
+               *index = btrfs_inode_extref_index(eb, extref);
        if (parent_objectid)
                *parent_objectid = btrfs_inode_extref_parent(eb, extref);
 
@@ -1212,11 +1215,101 @@ static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
 
        read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
 
-       *index = btrfs_inode_ref_index(eb, ref);
+       if (index)
+               *index = btrfs_inode_ref_index(eb, ref);
 
        return 0;
 }
 
+/*
+ * Take an inode reference item from the log tree and iterate all names from the
+ * inode reference item in the subvolume tree with the same key (if it exists).
+ * For any name that is not in the inode reference item from the log tree, do a
+ * proper unlink of that name (that is, remove its entry from the inode
+ * reference item and both dir index keys).
+ */
+static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
+                                struct btrfs_root *root,
+                                struct btrfs_path *path,
+                                struct btrfs_inode *inode,
+                                struct extent_buffer *log_eb,
+                                int log_slot,
+                                struct btrfs_key *key)
+{
+       int ret;
+       unsigned long ref_ptr;
+       unsigned long ref_end;
+       struct extent_buffer *eb;
+
+again:
+       btrfs_release_path(path);
+       ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+       if (ret > 0) {
+               ret = 0;
+               goto out;
+       }
+       if (ret < 0)
+               goto out;
+
+       eb = path->nodes[0];
+       ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
+       ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
+       while (ref_ptr < ref_end) {
+               char *name = NULL;
+               int namelen;
+               u64 parent_id;
+
+               if (key->type == BTRFS_INODE_EXTREF_KEY) {
+                       ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+                                               NULL, &parent_id);
+               } else {
+                       parent_id = key->offset;
+                       ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+                                            NULL);
+               }
+               if (ret)
+                       goto out;
+
+               if (key->type == BTRFS_INODE_EXTREF_KEY)
+                       ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
+                                                            parent_id, name,
+                                                            namelen, NULL);
+               else
+                       ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
+                                                        namelen, NULL);
+
+               if (!ret) {
+                       struct inode *dir;
+
+                       btrfs_release_path(path);
+                       dir = read_one_inode(root, parent_id);
+                       if (!dir) {
+                               ret = -ENOENT;
+                               kfree(name);
+                               goto out;
+                       }
+                       ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
+                                                inode, name, namelen);
+                       kfree(name);
+                       iput(dir);
+                       if (ret)
+                               goto out;
+                       goto again;
+               }
+
+               kfree(name);
+               ref_ptr += namelen;
+               if (key->type == BTRFS_INODE_EXTREF_KEY)
+                       ref_ptr += sizeof(struct btrfs_inode_extref);
+               else
+                       ref_ptr += sizeof(struct btrfs_inode_ref);
+       }
+       ret = 0;
+ out:
+       btrfs_release_path(path);
+       return ret;
+}
+
 /*
  * replay one inode back reference item found in the log tree.
  * eb, slot and key refer to the buffer and key found in the log tree.
@@ -1345,6 +1438,19 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                }
        }
 
+       /*
+        * Before we overwrite the inode reference item in the subvolume tree
+        * with the item from the log tree, we must unlink all names from the
+        * parent directory that are in the subvolume's tree inode reference
+        * item, otherwise we end up with an inconsistent subvolume tree where
+        * dir index entries exist for a name but there is no inode reference
+        * item with the same name.
+        */
+       ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
+                                   key);
+       if (ret)
+               goto out;
+
        /* finally write the back reference in the inode */
        ret = overwrite_item(trans, root, path, eb, slot, key);
 out:
@@ -5853,7 +5959,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
         * this will force the logging code to walk the dentry chain
         * up for the file
         */
-       if (S_ISREG(inode->vfs_inode.i_mode))
+       if (!S_ISDIR(inode->vfs_inode.i_mode))
                inode->last_unlink_trans = trans->transid;
 
        /*
index 2ceb924..b2d05c6 100644 (file)
@@ -4829,10 +4829,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        ndevs = min(ndevs, devs_max);
 
        /*
-        * the primary goal is to maximize the number of stripes, so use as many
-        * devices as possible, even if the stripes are not maximum sized.
+        * The primary goal is to maximize the number of stripes, so use as
+        * many devices as possible, even if the stripes are not maximum sized.
+        *
+        * The DUP profile stores more than one stripe per device, the
+        * max_avail is the total size so we have to adjust.
         */
-       stripe_size = devices_info[ndevs-1].max_avail;
+       stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
        num_stripes = ndevs * dev_stripes;
 
        /*
@@ -4867,8 +4870,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                        stripe_size = devices_info[ndevs-1].max_avail;
        }
 
-       stripe_size = div_u64(stripe_size, dev_stripes);
-
        /* align to BTRFS_STRIPE_LEN */
        stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
 
index 6582c45..0e5bd3e 100644 (file)
@@ -3964,6 +3964,32 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
                ceph_check_caps(ci, 0, NULL);
 }
 
+/*
+ * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
+ * looks like the link count will hit 0, drop any other caps (other
+ * than PIN) we don't specifically want (due to the file still being
+ * open).
+ */
+int ceph_drop_caps_for_unlink(struct inode *inode)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
+
+       spin_lock(&ci->i_ceph_lock);
+       if (inode->i_nlink == 1) {
+               drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
+
+               ci->i_ceph_flags |= CEPH_I_NODELAY;
+               if (__ceph_caps_dirty(ci)) {
+                       struct ceph_mds_client *mdsc =
+                               ceph_inode_to_client(inode)->mdsc;
+                       __cap_delay_requeue_front(mdsc, ci);
+               }
+       }
+       spin_unlock(&ci->i_ceph_lock);
+       return drop;
+}
+
 /*
  * Helpers for embedding cap and dentry lease releases into mds
  * requests.
index 0c43468..f1d9c6c 100644 (file)
@@ -1002,26 +1002,6 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
        return err;
 }
 
-/*
- * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps.  If it
- * looks like the link count will hit 0, drop any other caps (other
- * than PIN) we don't specifically want (due to the file still being
- * open).
- */
-static int drop_caps_for_unlink(struct inode *inode)
-{
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
-
-       spin_lock(&ci->i_ceph_lock);
-       if (inode->i_nlink == 1) {
-               drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
-               ci->i_ceph_flags |= CEPH_I_NODELAY;
-       }
-       spin_unlock(&ci->i_ceph_lock);
-       return drop;
-}
-
 /*
  * rmdir and unlink are differ only by the metadata op code
  */
@@ -1056,7 +1036,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
-       req->r_inode_drop = drop_caps_for_unlink(inode);
+       req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
        err = ceph_mdsc_do_request(mdsc, dir, req);
        if (!err && !req->r_reply_info.head->is_dentry)
                d_delete(dentry);
@@ -1104,8 +1084,10 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        /* release LINK_RDCACHE on source inode (mds will lock it) */
        req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
-       if (d_really_is_positive(new_dentry))
-               req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
+       if (d_really_is_positive(new_dentry)) {
+               req->r_inode_drop =
+                       ceph_drop_caps_for_unlink(d_inode(new_dentry));
+       }
        err = ceph_mdsc_do_request(mdsc, old_dir, req);
        if (!err && !req->r_reply_info.head->is_dentry) {
                /*
index a62d2a9..fb2bc9c 100644 (file)
@@ -225,6 +225,7 @@ static int parse_fsopt_token(char *c, void *private)
                        return -ENOMEM;
                break;
        case Opt_mds_namespace:
+               kfree(fsopt->mds_namespace);
                fsopt->mds_namespace = kstrndup(argstr[0].from,
                                                argstr[0].to-argstr[0].from,
                                                GFP_KERNEL);
@@ -232,6 +233,7 @@ static int parse_fsopt_token(char *c, void *private)
                        return -ENOMEM;
                break;
        case Opt_fscache_uniq:
+               kfree(fsopt->fscache_uniq);
                fsopt->fscache_uniq = kstrndup(argstr[0].from,
                                               argstr[0].to-argstr[0].from,
                                               GFP_KERNEL);
@@ -711,14 +713,17 @@ static int __init init_caches(void)
                goto bad_dentry;
 
        ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
-
        if (!ceph_file_cachep)
                goto bad_file;
 
-       if ((error = ceph_fscache_register()))
-               goto bad_file;
+       error = ceph_fscache_register();
+       if (error)
+               goto bad_fscache;
 
        return 0;
+
+bad_fscache:
+       kmem_cache_destroy(ceph_file_cachep);
 bad_file:
        kmem_cache_destroy(ceph_dentry_cachep);
 bad_dentry:
@@ -836,7 +841,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
        int err;
        unsigned long started = jiffies;  /* note the start time */
        struct dentry *root;
-       int first = 0;   /* first vfsmount for this super_block */
 
        dout("mount start %p\n", fsc);
        mutex_lock(&fsc->client->mount_mutex);
@@ -861,17 +865,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
                        path = fsc->mount_options->server_path + 1;
                        dout("mount opening path %s\n", path);
                }
+
+               err = ceph_fs_debugfs_init(fsc);
+               if (err < 0)
+                       goto out;
+
                root = open_root_dentry(fsc, path, started);
                if (IS_ERR(root)) {
                        err = PTR_ERR(root);
                        goto out;
                }
                fsc->sb->s_root = dget(root);
-               first = 1;
-
-               err = ceph_fs_debugfs_init(fsc);
-               if (err < 0)
-                       goto fail;
        } else {
                root = dget(fsc->sb->s_root);
        }
@@ -881,11 +885,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
        mutex_unlock(&fsc->client->mount_mutex);
        return root;
 
-fail:
-       if (first) {
-               dput(fsc->sb->s_root);
-               fsc->sb->s_root = NULL;
-       }
 out:
        mutex_unlock(&fsc->client->mount_mutex);
        return ERR_PTR(err);
index 21b2e5b..1c2086e 100644 (file)
@@ -987,7 +987,7 @@ extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
 extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
 extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
-
+extern int  ceph_drop_caps_for_unlink(struct inode *inode);
 extern int ceph_encode_inode_release(void **p, struct inode *inode,
                                     int mds, int drop, int unless, int force);
 extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
index a0ca9e4..1357ef5 100644 (file)
@@ -1274,8 +1274,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
         */
        if (dio->is_async && iov_iter_rw(iter) == WRITE) {
                retval = 0;
-               if ((iocb->ki_filp->f_flags & O_DSYNC) ||
-                   IS_SYNC(iocb->ki_filp->f_mapping->host))
+               if (iocb->ki_flags & IOCB_DSYNC)
                        retval = dio_set_defer_completion(dio);
                else if (!dio->inode->i_sb->s_dio_done_wq) {
                        /*
index 2435af5..a50d781 100644 (file)
@@ -572,7 +572,7 @@ out:
 }
 
 static bool
-validate_bitmap_values(unsigned long mask)
+validate_bitmap_values(unsigned int mask)
 {
        return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
 }
@@ -596,17 +596,15 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
                goto out;
 
        status = cpu_to_be32(NFS4_OK);
-       if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
-                    &args->craa_type_mask))
+       if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
                flags = FMODE_READ;
-       if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
-                    &args->craa_type_mask))
+       if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
                flags |= FMODE_WRITE;
-       if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
-                    &args->craa_type_mask))
-               pnfs_recall_all_layouts(cps->clp);
        if (flags)
                nfs_expire_unused_delegation_types(cps->clp, flags);
+
+       if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
+               pnfs_recall_all_layouts(cps->clp);
 out:
        dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
        return status;
index 49f848f..7327930 100644 (file)
@@ -873,7 +873,7 @@ static void nfs3_nlm_release_call(void *data)
        }
 }
 
-const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
+static const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
        .nlmclnt_alloc_call = nfs3_nlm_alloc_call,
        .nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare,
        .nlmclnt_release_call = nfs3_nlm_release_call,
index 04612c2..9796314 100644 (file)
@@ -868,8 +868,10 @@ static int nfs4_set_client(struct nfs_server *server,
        if (IS_ERR(clp))
                return PTR_ERR(clp);
 
-       if (server->nfs_client == clp)
+       if (server->nfs_client == clp) {
+               nfs_put_client(clp);
                return -ELOOP;
+       }
 
        /*
         * Query for the lease time on clientid setup or renewal
@@ -1244,11 +1246,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
                                clp->cl_proto, clnt->cl_timeout,
                                clp->cl_minorversion, net);
        clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
-       nfs_put_client(clp);
        if (error != 0) {
                nfs_server_insert_lists(server);
                return error;
        }
+       nfs_put_client(clp);
 
        if (server->nfs_client->cl_hostname == NULL)
                server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
index fd97552..05c66e0 100644 (file)
@@ -767,7 +767,7 @@ int
 xfs_scrub_agfl(
        struct xfs_scrub_context        *sc)
 {
-       struct xfs_scrub_agfl_info      sai = { 0 };
+       struct xfs_scrub_agfl_info      sai;
        struct xfs_agf                  *agf;
        xfs_agnumber_t                  agno;
        unsigned int                    agflcount;
@@ -795,6 +795,7 @@ xfs_scrub_agfl(
                xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
                goto out;
        }
+       memset(&sai, 0, sizeof(sai));
        sai.sz_entries = agflcount;
        sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
        if (!sai.entries) {
index 3a55d6f..7a39f40 100644 (file)
@@ -23,6 +23,7 @@
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
 #include "xfs_bit.h"
+#include "xfs_shared.h"
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_trans.h"
@@ -456,10 +457,12 @@ xfs_cui_recover(
         * transaction.  Normally, any work that needs to be deferred
         * gets attached to the same defer_ops that scheduled the
         * refcount update.  However, we're in log recovery here, so we
-        * we create our own defer_ops and use that to finish up any
-        * work that doesn't fit.
+        * we use the passed in defer_ops and to finish up any work that
+        * doesn't fit.  We need to reserve enough blocks to handle a
+        * full btree split on either end of the refcount range.
         */
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+                       mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
                return error;
        cudp = xfs_trans_get_cud(tp, cuip);
index f3b139c..49d3124 100644 (file)
@@ -23,6 +23,7 @@
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
 #include "xfs_bit.h"
+#include "xfs_shared.h"
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_trans.h"
@@ -470,7 +471,8 @@ xfs_rui_recover(
                }
        }
 
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+                       mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
                return error;
        rudp = xfs_trans_get_rud(tp, ruip);
index 7aba628..93588ea 100644 (file)
@@ -250,6 +250,7 @@ xfs_parseargs(
                                return -EINVAL;
                        break;
                case Opt_logdev:
+                       kfree(mp->m_logname);
                        mp->m_logname = match_strdup(args);
                        if (!mp->m_logname)
                                return -ENOMEM;
@@ -258,6 +259,7 @@ xfs_parseargs(
                        xfs_warn(mp, "%s option not allowed on this system", p);
                        return -EINVAL;
                case Opt_rtdev:
+                       kfree(mp->m_rtname);
                        mp->m_rtname = match_strdup(args);
                        if (!mp->m_rtname)
                                return -ENOMEM;
index d32b688..d23dcdd 100644 (file)
@@ -56,6 +56,7 @@ struct drm_printer;
 #define DRIVER_ATOMIC                  0x10000
 #define DRIVER_KMS_LEGACY_CONTEXT      0x20000
 #define DRIVER_SYNCOBJ                  0x40000
+#define DRIVER_PREFER_XBGR_30BPP        0x80000
 
 /**
  * struct drm_driver - DRM driver structure
index d0eb659..ce547a2 100644 (file)
@@ -511,6 +511,7 @@ void zero_fill_bio(struct bio *bio);
 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
+extern const char *bio_devname(struct bio *bio, char *buffer);
 
 #define bio_set_dev(bio, bdev)                         \
 do {                                           \
@@ -529,9 +530,6 @@ do {                                                \
 #define bio_dev(bio) \
        disk_devt((bio)->bi_disk)
 
-#define bio_devname(bio, buf) \
-       __bdevname(bio_dev(bio), (buf))
-
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
 void bio_disassociate_task(struct bio *bio);
index d02a4df..d3f264a 100644 (file)
@@ -27,3 +27,8 @@
 #if __has_feature(address_sanitizer)
 #define __SANITIZE_ADDRESS__
 #endif
+
+/* Clang doesn't have a way to turn it off per-function, yet. */
+#ifdef __noretpoline
+#undef __noretpoline
+#endif
index 901c1cc..e2c7f43 100644 (file)
 #define __weak         __attribute__((weak))
 #define __alias(symbol)        __attribute__((alias(#symbol)))
 
+#ifdef RETPOLINE
+#define __noretpoline __attribute__((indirect_branch("keep")))
+#endif
+
 /*
  * it doesn't make sense on ARM (currently the only user of __naked)
  * to trace naked functions because then mcount is called without
index 2a81556..79c4139 100644 (file)
@@ -3198,7 +3198,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
        if (!vma_is_dax(vma))
                return false;
        inode = file_inode(vma->vm_file);
-       if (inode->i_mode == S_IFCHR)
+       if (S_ISCHR(inode->i_mode))
                return false; /* device-dax */
        return true;
 }
index 5e35310..c826b0b 100644 (file)
@@ -198,6 +198,7 @@ struct gendisk {
        void *private_data;
 
        int flags;
+       struct rw_semaphore lookup_sem;
        struct kobject *slave_dir;
 
        struct timer_rand_state *random;
@@ -600,8 +601,9 @@ extern void delete_partition(struct gendisk *, int);
 extern void printk_all_partitions(void);
 
 extern struct gendisk *__alloc_disk_node(int minors, int node_id);
-extern struct kobject *get_disk(struct gendisk *disk);
+extern struct kobject *get_disk_and_module(struct gendisk *disk);
 extern void put_disk(struct gendisk *disk);
+extern void put_disk_and_module(struct gendisk *disk);
 extern void blk_register_region(dev_t devt, unsigned long range,
                        struct module *module,
                        struct kobject *(*probe)(dev_t, int *, void *),
index 506a981..bc27cf0 100644 (file)
@@ -6,10 +6,10 @@
 #include <linux/types.h>
 
 /* Built-in __init functions needn't be compiled with retpoline */
-#if defined(RETPOLINE) && !defined(MODULE)
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#if defined(__noretpoline) && !defined(MODULE)
+#define __noinitretpoline __noretpoline
 #else
-#define __noretpoline
+#define __noinitretpoline
 #endif
 
 /* These macros are used to mark some functions or 
@@ -47,7 +47,7 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init         __section(.init.text) __cold  __latent_entropy __noretpoline
+#define __init         __section(.init.text) __cold  __latent_entropy __noinitretpoline
 #define __initdata     __section(.init.data)
 #define __initconst    __section(.init.rodata)
 #define __exitdata     __section(.exit.data)
index b6a29c1..2168cc6 100644 (file)
@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[];
 extern struct jump_entry __stop___jump_table[];
 
 extern void jump_label_init(void);
+extern void jump_label_invalidate_init(void);
 extern void jump_label_lock(void);
 extern void jump_label_unlock(void);
 extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -198,6 +199,8 @@ static __always_inline void jump_label_init(void)
        static_key_initialized = true;
 }
 
+static inline void jump_label_invalidate_init(void) {}
+
 static __always_inline bool static_key_false(struct static_key *key)
 {
        if (unlikely(static_key_count(key) > 0))
index ce51455..3fd2915 100644 (file)
@@ -472,6 +472,7 @@ extern bool parse_option_str(const char *str, const char *option);
 extern char *next_arg(char *args, char **param, char **val);
 
 extern int core_kernel_text(unsigned long addr);
+extern int init_kernel_text(unsigned long addr);
 extern int core_kernel_data(unsigned long addr);
 extern int __kernel_text_address(unsigned long addr);
 extern int kernel_text_address(unsigned long addr);
index ac0062b..6930c63 100644 (file)
@@ -1105,7 +1105,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
 {
 }
 #endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
 
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
@@ -1114,6 +1113,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
        /*
@@ -1272,4 +1273,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
 }
 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end);
+
 #endif
index f25c134..cb3bbed 100644 (file)
@@ -66,6 +66,11 @@ struct mutex {
 #endif
 };
 
+/*
+ * Internal helper function; C doesn't allow us to hide it :/
+ *
+ * DO NOT USE (outside of mutex code).
+ */
 static inline struct task_struct *__mutex_owner(struct mutex *lock)
 {
        return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
index fbc98e2..e791ebc 100644 (file)
@@ -5,6 +5,7 @@
 
 #ifndef _LINUX_NOSPEC_H
 #define _LINUX_NOSPEC_H
+#include <asm/barrier.h>
 
 /**
  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
@@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 }
 #endif
 
-/*
- * Warn developers about inappropriate array_index_nospec() usage.
- *
- * Even if the CPU speculates past the WARN_ONCE branch, the
- * sign bit of @index is taken into account when generating the
- * mask.
- *
- * This warning is compiled out when the compiler can infer that
- * @index and @size are less than LONG_MAX.
- */
-#define array_index_mask_nospec_check(index, size)                             \
-({                                                                             \
-       if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,                      \
-           "array_index_nospec() limited to range of [0, LONG_MAX]\n"))        \
-               _mask = 0;                                                      \
-       else                                                                    \
-               _mask = array_index_mask_nospec(index, size);                   \
-       _mask;                                                                  \
-})
-
 /*
  * array_index_nospec - sanitize an array index after a bounds check
  *
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 ({                                                                     \
        typeof(index) _i = (index);                                     \
        typeof(size) _s = (size);                                       \
-       unsigned long _mask = array_index_mask_nospec_check(_i, _s);    \
+       unsigned long _mask = array_index_mask_nospec(_i, _s);          \
                                                                        \
        BUILD_BUG_ON(sizeof(_i) > sizeof(long));                        \
        BUILD_BUG_ON(sizeof(_s) > sizeof(long));                        \
                                                                        \
-       _i &= _mask;                                                    \
-       _i;                                                             \
+       (typeof(_i)) (_i & _mask);                                      \
 })
 #endif /* _LINUX_NOSPEC_H */
index 6e38c69..5a9b175 100644 (file)
@@ -924,6 +924,7 @@ void phy_device_remove(struct phy_device *phydev);
 int phy_init_hw(struct phy_device *phydev);
 int phy_suspend(struct phy_device *phydev);
 int phy_resume(struct phy_device *phydev);
+int __phy_resume(struct phy_device *phydev);
 int phy_loopback(struct phy_device *phydev, bool enable);
 struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
                              phy_interface_t interface);
index 9bc1750..d8340e6 100644 (file)
@@ -3288,8 +3288,7 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
@@ -4107,38 +4106,6 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
        return !skb->head_frag || skb_cloned(skb);
 }
 
-/**
- * skb_gso_network_seglen - Return length of individual segments of a gso packet
- *
- * @skb: GSO skb
- *
- * skb_gso_network_seglen is used to determine the real size of the
- * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
- *
- * The MAC/L2 header is not accounted for.
- */
-static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
-{
-       unsigned int hdr_len = skb_transport_header(skb) -
-                              skb_network_header(skb);
-       return hdr_len + skb_gso_transport_seglen(skb);
-}
-
-/**
- * skb_gso_mac_seglen - Return length of individual segments of a gso packet
- *
- * @skb: GSO skb
- *
- * skb_gso_mac_seglen is used to determine the real size of the
- * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
- * headers (TCP/UDP).
- */
-static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
-{
-       unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
-       return hdr_len + skb_gso_transport_seglen(skb);
-}
-
 /* Local Checksum Offload.
  * Compute outer checksum based on the assumption that the
  * inner checksum will be offloaded later.
index c4df6ce..bf00a5a 100644 (file)
@@ -117,7 +117,7 @@ struct dmx_ts_feed {
  *               specified by @filter_value that will be used on the filter
  *               match logic.
  * @filter_mode:  Contains a 16 bytes (128 bits) filter mode.
- * @parent:      Pointer to struct dmx_section_feed.
+ * @parent:      Back-pointer to struct dmx_section_feed.
  * @priv:        Pointer to private data of the API client.
  *
  *
@@ -130,8 +130,9 @@ struct dmx_section_filter {
        u8 filter_value[DMX_MAX_FILTER_SIZE];
        u8 filter_mask[DMX_MAX_FILTER_SIZE];
        u8 filter_mode[DMX_MAX_FILTER_SIZE];
-       struct dmx_section_feed *parent; /* Back-pointer */
-       void *priv; /* Pointer to private data of the API client */
+       struct dmx_section_feed *parent;
+
+       void *priv;
 };
 
 /**
@@ -193,6 +194,10 @@ struct dmx_section_feed {
  * @buffer2:           Pointer to the tail of the filtered TS packets, or NULL.
  * @buffer2_length:    Length of the TS data in buffer2.
  * @source:            Indicates which TS feed is the source of the callback.
+ * @buffer_flags:      Address where buffer flags are stored. Those are
+ *                     used to report discontinuity users via DVB
+ *                     memory mapped API, as defined by
+ *                     &enum dmx_buffer_flags.
  *
  * This function callback prototype, provided by the client of the demux API,
  * is called from the demux code. The function is only called when filtering
@@ -245,7 +250,8 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1,
                         size_t buffer1_length,
                         const u8 *buffer2,
                         size_t buffer2_length,
-                        struct dmx_ts_feed *source);
+                        struct dmx_ts_feed *source,
+                        u32 *buffer_flags);
 
 /**
  * typedef dmx_section_cb - DVB demux TS filter callback function prototype
@@ -261,6 +267,10 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1,
  *                     including headers and CRC.
  * @source:            Indicates which section feed is the source of the
  *                     callback.
+ * @buffer_flags:      Address where buffer flags are stored. Those are
+ *                     used to report discontinuity users via DVB
+ *                     memory mapped API, as defined by
+ *                     &enum dmx_buffer_flags.
  *
  * This function callback prototype, provided by the client of the demux API,
  * is called from the demux code. The function is only called when
@@ -286,7 +296,8 @@ typedef int (*dmx_section_cb)(const u8 *buffer1,
                              size_t buffer1_len,
                              const u8 *buffer2,
                              size_t buffer2_len,
-                             struct dmx_section_filter *source);
+                             struct dmx_section_filter *source,
+                             u32 *buffer_flags);
 
 /*
  * DVB Front-End
index 2f5cb2c..baafa3b 100644 (file)
@@ -163,6 +163,7 @@ struct dmxdev_filter {
  * @demux:             pointer to &struct dmx_demux.
  * @filternum:         number of filters.
  * @capabilities:      demux capabilities as defined by &enum dmx_demux_caps.
+ * @may_do_mmap:       flag used to indicate if the device may do mmap.
  * @exit:              flag to indicate that the demux is being released.
  * @dvr_orig_fe:       pointer to &struct dmx_frontend.
  * @dvr_buffer:                embedded &struct dvb_ringbuffer for DVB output.
@@ -180,6 +181,7 @@ struct dmxdev {
        int filternum;
        int capabilities;
 
+       unsigned int may_do_mmap:1;
        unsigned int exit:1;
 #define DMXDEV_CAP_DUPLEX 1
        struct dmx_frontend *dvr_orig_fe;
index b070920..3b6aeca 100644 (file)
@@ -115,6 +115,8 @@ struct dvb_demux_filter {
  * @pid:       PID to be filtered.
  * @timeout:   feed timeout.
  * @filter:    pointer to &struct dvb_demux_filter.
+ * @buffer_flags: Buffer flags used to report discontinuity users via DVB
+ *               memory mapped API, as defined by &enum dmx_buffer_flags.
  * @ts_type:   type of TS, as defined by &enum ts_filter_type.
  * @pes_type:  type of PES, as defined by &enum dmx_ts_pes.
  * @cc:                MPEG-TS packet continuity counter
@@ -145,6 +147,8 @@ struct dvb_demux_feed {
        ktime_t timeout;
        struct dvb_demux_filter *filter;
 
+       u32 buffer_flags;
+
        enum ts_filter_type ts_type;
        enum dmx_ts_pes pes_type;
 
index 01d1202..8cb8845 100644 (file)
@@ -85,6 +85,12 @@ struct dvb_buffer {
  * @nonblocking:
  *             If different than zero, device is operating on non-blocking
  *             mode.
+ * @flags:     buffer flags as defined by &enum dmx_buffer_flags.
+ *             Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field.
+ * @count:     monotonic counter for filled buffers. Helps to identify
+ *             data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should
+ *             zero this field.
+ *
  * @name:      name of the device type. Currently, it can either be
  *             "dvr" or "demux_filter".
  */
@@ -100,10 +106,14 @@ struct dvb_vb2_ctx {
        int     buf_siz;
        int     buf_cnt;
        int     nonblocking;
+
+       enum dmx_buffer_flags flags;
+       u32     count;
+
        char    name[DVB_VB2_NAME_MAX + 1];
 };
 
-#ifndef DVB_MMAP
+#ifndef CONFIG_DVB_MMAP
 static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx,
                               const char *name, int non_blocking)
 {
@@ -114,7 +124,7 @@ static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx)
        return 0;
 };
 #define dvb_vb2_is_streaming(ctx) (0)
-#define dvb_vb2_fill_buffer(ctx, file, wait) (0)
+#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0)
 
 static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx,
                                    struct file *file,
@@ -153,9 +163,13 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx);
  * @ctx:       control struct for VB2 handler
  * @src:       place where the data is stored
  * @len:       number of bytes to be copied from @src
+ * @buffer_flags:
+ *             pointer to buffer flags as defined by &enum dmx_buffer_flags.
+ *             can be NULL.
  */
 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
-                       const unsigned char *src, int len);
+                       const unsigned char *src, int len,
+                       enum dmx_buffer_flags *buffer_flags);
 
 /**
  * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV
index 8d1c3f2..c83125a 100644 (file)
@@ -253,6 +253,18 @@ struct devlink_resource_size_params {
        enum devlink_resource_unit unit;
 };
 
+static inline void
+devlink_resource_size_params_init(struct devlink_resource_size_params *size_params,
+                                 u64 size_min, u64 size_max,
+                                 u64 size_granularity,
+                                 enum devlink_resource_unit unit)
+{
+       size_params->size_min = size_min;
+       size_params->size_max = size_max;
+       size_params->size_granularity = size_granularity;
+       size_params->unit = unit;
+}
+
 /**
  * struct devlink_resource - devlink resource
  * @name: name of the resource
@@ -274,7 +286,7 @@ struct devlink_resource {
        u64 size_new;
        bool size_valid;
        struct devlink_resource *parent;
-       struct devlink_resource_size_params *size_params;
+       struct devlink_resource_size_params size_params;
        struct list_head list;
        struct list_head resource_list;
        const struct devlink_resource_ops *resource_ops;
@@ -398,7 +410,7 @@ int devlink_resource_register(struct devlink *devlink,
                              u64 resource_size,
                              u64 resource_id,
                              u64 parent_resource_id,
-                             struct devlink_resource_size_params *size_params,
+                             const struct devlink_resource_size_params *size_params,
                              const struct devlink_resource_ops *resource_ops);
 void devlink_resources_unregister(struct devlink *devlink,
                                  struct devlink_resource *resource);
@@ -552,7 +564,7 @@ devlink_resource_register(struct devlink *devlink,
                          u64 resource_size,
                          u64 resource_id,
                          u64 parent_resource_id,
-                         struct devlink_resource_size_params *size_params,
+                         const struct devlink_resource_size_params *size_params,
                          const struct devlink_resource_ops *resource_ops)
 {
        return 0;
index c2d1b15..a91f251 100644 (file)
@@ -15,6 +15,7 @@
 
 #define ARC_REG_MCIP_BCR       0x0d0
 #define ARC_REG_MCIP_IDU_BCR   0x0D5
+#define ARC_REG_GFRC_BUILD     0x0D6
 #define ARC_REG_MCIP_CMD       0x600
 #define ARC_REG_MCIP_WDATA     0x601
 #define ARC_REG_MCIP_READBACK  0x602
@@ -36,10 +37,14 @@ struct mcip_cmd {
 #define CMD_SEMA_RELEASE               0x12
 
 #define CMD_DEBUG_SET_MASK             0x34
+#define CMD_DEBUG_READ_MASK            0x35
 #define CMD_DEBUG_SET_SELECT           0x36
+#define CMD_DEBUG_READ_SELECT          0x37
 
 #define CMD_GFRC_READ_LO               0x42
 #define CMD_GFRC_READ_HI               0x43
+#define CMD_GFRC_SET_CORE              0x47
+#define CMD_GFRC_READ_CORE             0x48
 
 #define CMD_IDU_ENABLE                 0x71
 #define CMD_IDU_DISABLE                        0x72
index 91a31ff..9a781f0 100644 (file)
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
 
 struct drm_virtgpu_getparam {
        __u64 param;
index 20d1490..3c50e07 100644 (file)
@@ -131,7 +131,7 @@ enum {
 #define BLKTRACE_BDEV_SIZE     32
 
 /*
- * User setup structure passed with BLKTRACESTART
+ * User setup structure passed with BLKTRACESETUP
  */
 struct blk_user_trace_setup {
        char name[BLKTRACE_BDEV_SIZE];  /* output */
index 5f3c5a9..b4112f0 100644 (file)
@@ -211,6 +211,32 @@ struct dmx_stc {
        __u64 stc;
 };
 
+/**
+ * enum dmx_buffer_flags - DMX memory-mapped buffer flags
+ *
+ * @DMX_BUFFER_FLAG_HAD_CRC32_DISCARD:
+ *     Indicates that the Kernel discarded one or more frames due to wrong
+ *     CRC32 checksum.
+ * @DMX_BUFFER_FLAG_TEI:
+ *     Indicates that the Kernel has detected a Transport Error indicator
+ *     (TEI) on a filtered pid.
+ * @DMX_BUFFER_PKT_COUNTER_MISMATCH:
+ *     Indicates that the Kernel has detected a packet counter mismatch
+ *     on a filtered pid.
+ * @DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED:
+ *     Indicates that the Kernel has detected one or more frame discontinuity.
+ * @DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR:
+ *     Received at least one packet with a frame discontinuity indicator.
+ */
+
+enum dmx_buffer_flags {
+       DMX_BUFFER_FLAG_HAD_CRC32_DISCARD               = 1 << 0,
+       DMX_BUFFER_FLAG_TEI                             = 1 << 1,
+       DMX_BUFFER_PKT_COUNTER_MISMATCH                 = 1 << 2,
+       DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED          = 1 << 3,
+       DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR         = 1 << 4,
+};
+
 /**
  * struct dmx_buffer - dmx buffer info
  *
@@ -220,15 +246,24 @@ struct dmx_stc {
  *             offset from the start of the device memory for this plane,
  *             (or a "cookie" that should be passed to mmap() as offset)
  * @length:    size in bytes of the buffer
+ * @flags:     bit array of buffer flags as defined by &enum dmx_buffer_flags.
+ *             Filled only at &DMX_DQBUF.
+ * @count:     monotonic counter for filled buffers. Helps to identify
+ *             data stream loses. Filled only at &DMX_DQBUF.
  *
  * Contains data exchanged by application and driver using one of the streaming
  * I/O methods.
+ *
+ * Please notice that, for &DMX_QBUF, only @index should be filled.
+ * On &DMX_DQBUF calls, all fields will be filled by the Kernel.
  */
 struct dmx_buffer {
        __u32                   index;
        __u32                   bytesused;
        __u32                   offset;
        __u32                   length;
+       __u32                   flags;
+       __u32                   count;
 };
 
 /**
index 0fb5ef9..7b26d4b 100644 (file)
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
 #define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
 #define KVM_GET_EMULATED_CPUID   _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
+#define KVM_GET_MSR_FEATURE_INDEX_LIST    _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
 
 /*
  * Extension capability list.
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_AIS_MIGRATION 150
 #define KVM_CAP_PPC_GET_CPU_CHAR 151
 #define KVM_CAP_S390_BPB 152
+#define KVM_CAP_GET_MSR_FEATURES 153
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 3d77fe9..9008f31 100644 (file)
@@ -42,7 +42,7 @@ typedef enum {
        SEV_RET_INVALID_PLATFORM_STATE,
        SEV_RET_INVALID_GUEST_STATE,
        SEV_RET_INAVLID_CONFIG,
-       SEV_RET_INVALID_len,
+       SEV_RET_INVALID_LEN,
        SEV_RET_ALREADY_OWNED,
        SEV_RET_INVALID_CERTIFICATE,
        SEV_RET_POLICY_FAILURE,
index a8100b9..969eaf1 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/io.h>
 #include <linux/cache.h>
 #include <linux/rodata_test.h>
+#include <linux/jump_label.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -1000,6 +1001,7 @@ static int __ref kernel_init(void *unused)
        /* need to finish all async __init code before freeing the memory */
        async_synchronize_full();
        ftrace_free_init_mem();
+       jump_label_invalidate_init();
        free_initmem();
        mark_readonly();
        system_state = SYSTEM_RUNNING;
index 3c74b16..eb79a34 100644 (file)
@@ -1352,6 +1352,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
        return reg->type == PTR_TO_CTX;
 }
 
+static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
+{
+       const struct bpf_reg_state *reg = cur_regs(env) + regno;
+
+       return type_is_pkt_pointer(reg->type);
+}
+
 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
                                   const struct bpf_reg_state *reg,
                                   int off, int size, bool strict)
@@ -1412,10 +1419,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
 }
 
 static int check_ptr_alignment(struct bpf_verifier_env *env,
-                              const struct bpf_reg_state *reg,
-                              int off, int size)
+                              const struct bpf_reg_state *reg, int off,
+                              int size, bool strict_alignment_once)
 {
-       bool strict = env->strict_alignment;
+       bool strict = env->strict_alignment || strict_alignment_once;
        const char *pointer_desc = "";
 
        switch (reg->type) {
@@ -1572,9 +1579,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
  * if t==write && value_regno==-1, some unknown value is stored into memory
  * if t==read && value_regno==-1, don't care what we read from memory
  */
-static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
-                           int bpf_size, enum bpf_access_type t,
-                           int value_regno)
+static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
+                           int off, int bpf_size, enum bpf_access_type t,
+                           int value_regno, bool strict_alignment_once)
 {
        struct bpf_reg_state *regs = cur_regs(env);
        struct bpf_reg_state *reg = regs + regno;
@@ -1586,7 +1593,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                return size;
 
        /* alignment checks will add in reg->off themselves */
-       err = check_ptr_alignment(env, reg, off, size);
+       err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
        if (err)
                return err;
 
@@ -1731,21 +1738,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
                return -EACCES;
        }
 
-       if (is_ctx_reg(env, insn->dst_reg)) {
-               verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
-                       insn->dst_reg);
+       if (is_ctx_reg(env, insn->dst_reg) ||
+           is_pkt_reg(env, insn->dst_reg)) {
+               verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
+                       insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
+                       "context" : "packet");
                return -EACCES;
        }
 
        /* check whether atomic_add can read the memory */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-                              BPF_SIZE(insn->code), BPF_READ, -1);
+                              BPF_SIZE(insn->code), BPF_READ, -1, true);
        if (err)
                return err;
 
        /* check whether atomic_add can write into the same memory */
        return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-                               BPF_SIZE(insn->code), BPF_WRITE, -1);
+                               BPF_SIZE(insn->code), BPF_WRITE, -1, true);
 }
 
 /* when register 'regno' is passed into function that will read 'access_size'
@@ -2384,7 +2393,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
         * is inferred from register state.
         */
        for (i = 0; i < meta.access_size; i++) {
-               err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
+               err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
+                                      BPF_WRITE, -1, false);
                if (err)
                        return err;
        }
@@ -4628,7 +4638,7 @@ static int do_check(struct bpf_verifier_env *env)
                         */
                        err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_READ,
-                                              insn->dst_reg);
+                                              insn->dst_reg, false);
                        if (err)
                                return err;
 
@@ -4680,7 +4690,7 @@ static int do_check(struct bpf_verifier_env *env)
                        /* check that memory (dst_reg + off) is writeable */
                        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
-                                              insn->src_reg);
+                                              insn->src_reg, false);
                        if (err)
                                return err;
 
@@ -4715,7 +4725,7 @@ static int do_check(struct bpf_verifier_env *env)
                        /* check that memory (dst_reg + off) is writeable */
                        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
-                                              -1);
+                                              -1, false);
                        if (err)
                                return err;
 
index a17fdb6..6a5b61e 100644 (file)
@@ -64,7 +64,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
        return e;
 }
 
-static inline int init_kernel_text(unsigned long addr)
+int init_kernel_text(unsigned long addr)
 {
        if (addr >= (unsigned long)_sinittext &&
            addr < (unsigned long)_einittext)
index 5187dfe..4c57704 100644 (file)
@@ -16,6 +16,7 @@ struct cpumap {
        unsigned int            available;
        unsigned int            allocated;
        unsigned int            managed;
+       bool                    initialized;
        bool                    online;
        unsigned long           alloc_map[IRQ_MATRIX_SIZE];
        unsigned long           managed_map[IRQ_MATRIX_SIZE];
@@ -81,9 +82,11 @@ void irq_matrix_online(struct irq_matrix *m)
 
        BUG_ON(cm->online);
 
-       bitmap_zero(cm->alloc_map, m->matrix_bits);
-       cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
-       cm->allocated = 0;
+       if (!cm->initialized) {
+               cm->available = m->alloc_size;
+               cm->available -= cm->managed + m->systembits_inalloc;
+               cm->initialized = true;
+       }
        m->global_available += cm->available;
        cm->online = true;
        m->online_maps++;
@@ -370,14 +373,16 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
        if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
                return;
 
-       if (cm->online) {
-               clear_bit(bit, cm->alloc_map);
-               cm->allocated--;
+       clear_bit(bit, cm->alloc_map);
+       cm->allocated--;
+
+       if (cm->online)
                m->total_allocated--;
-               if (!managed) {
-                       cm->available++;
+
+       if (!managed) {
+               cm->available++;
+               if (cm->online)
                        m->global_available++;
-               }
        }
        trace_irq_matrix_free(bit, cpu, m, cm);
 }
index b451709..52a0a7a 100644 (file)
@@ -366,12 +366,15 @@ static void __jump_label_update(struct static_key *key,
 {
        for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
                /*
-                * entry->code set to 0 invalidates module init text sections
-                * kernel_text_address() verifies we are not in core kernel
-                * init code, see jump_label_invalidate_module_init().
+                * An entry->code of 0 indicates an entry which has been
+                * disabled because it was in an init text area.
                 */
-               if (entry->code && kernel_text_address(entry->code))
-                       arch_jump_label_transform(entry, jump_label_type(entry));
+               if (entry->code) {
+                       if (kernel_text_address(entry->code))
+                               arch_jump_label_transform(entry, jump_label_type(entry));
+                       else
+                               WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
+               }
        }
 }
 
@@ -417,6 +420,19 @@ void __init jump_label_init(void)
        cpus_read_unlock();
 }
 
+/* Disable any jump label entries in __init code */
+void __init jump_label_invalidate_init(void)
+{
+       struct jump_entry *iter_start = __start___jump_table;
+       struct jump_entry *iter_stop = __stop___jump_table;
+       struct jump_entry *iter;
+
+       for (iter = iter_start; iter < iter_stop; iter++) {
+               if (init_kernel_text(iter->code))
+                       iter->code = 0;
+       }
+}
+
 #ifdef CONFIG_MODULES
 
 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
@@ -633,6 +649,7 @@ static void jump_label_del_module(struct module *mod)
        }
 }
 
+/* Disable any jump label entries in module init code */
 static void jump_label_invalidate_module_init(struct module *mod)
 {
        struct jump_entry *iter_start = mod->jump_entries;
index 4849be5..4dd4274 100644 (file)
@@ -275,8 +275,15 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap)
        return (res->start + resource_size(res)) >> PAGE_SHIFT;
 }
 
+static unsigned long pfn_next(unsigned long pfn)
+{
+       if (pfn % 1024 == 0)
+               cond_resched();
+       return pfn + 1;
+}
+
 #define for_each_device_pfn(pfn, map) \
-       for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
+       for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
 
 static void devm_memremap_pages_release(void *data)
 {
@@ -337,10 +344,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        resource_size_t align_start, align_size, align_end;
        struct vmem_altmap *altmap = pgmap->altmap_valid ?
                        &pgmap->altmap : NULL;
+       struct resource *res = &pgmap->res;
        unsigned long pfn, pgoff, order;
        pgprot_t pgprot = PAGE_KERNEL;
-       int error, nid, is_ram, i = 0;
-       struct resource *res = &pgmap->res;
+       int error, nid, is_ram;
 
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -409,8 +416,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
                list_del(&page->lru);
                page->pgmap = pgmap;
                percpu_ref_get(pgmap->ref);
-               if (!(++i % 1024))
-                       cond_resched();
        }
 
        devm_add_action(dev, devm_memremap_pages_release, pgmap);
index fc11235..f274fbe 100644 (file)
@@ -2397,7 +2397,7 @@ skip:
 
                if (console_lock_spinning_disable_and_check()) {
                        printk_safe_exit_irqrestore(flags);
-                       return;
+                       goto out;
                }
 
                printk_safe_exit_irqrestore(flags);
@@ -2430,6 +2430,7 @@ skip:
        if (retry && console_trylock())
                goto again;
 
+out:
        if (wake_klogd)
                wake_up_klogd();
 }
index 48150ab..4a4fd56 100644 (file)
@@ -1894,6 +1894,12 @@ int timers_dead_cpu(unsigned int cpu)
                raw_spin_lock_irq(&new_base->lock);
                raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
+               /*
+                * The current CPUs base clock might be stale. Update it
+                * before moving the timers over.
+                */
+               forward_timer_base(new_base);
+
                BUG_ON(old_base->running_timer);
 
                for (i = 0; i < WHEEL_SIZE; i++)
index 1b34d21..7f5cdc1 100644 (file)
@@ -1491,12 +1491,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
        if (unlikely(virt == NULL))
                return;
 
-       entry = dma_entry_alloc();
-       if (!entry)
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
                return;
 
-       /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+       entry = dma_entry_alloc();
+       if (!entry)
                return;
 
        entry->type      = dma_debug_coherent;
@@ -1528,7 +1528,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
        };
 
        /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
                return;
 
        if (is_vmalloc_addr(virt))
index 99ec5bc..823b813 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
 {
        struct radix_tree_iter iter;
        void __rcu **slot;
-       int base = idr->idr_base;
-       int id = *nextid;
+       unsigned int base = idr->idr_base;
+       unsigned int id = *nextid;
 
        if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
                return -EINVAL;
@@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr,
 
        radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
                int ret;
+               unsigned long id = iter.index + base;
 
-               if (WARN_ON_ONCE(iter.index > INT_MAX))
+               if (WARN_ON_ONCE(id > INT_MAX))
                        break;
-               ret = fn(iter.index + base, rcu_dereference_raw(*slot), data);
+               ret = fn(id, rcu_dereference_raw(*slot), data);
                if (ret)
                        return ret;
        }
@@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid)
 {
        struct radix_tree_iter iter;
        void __rcu **slot;
-       int base = idr->idr_base;
-       int id = *nextid;
+       unsigned long base = idr->idr_base;
+       unsigned long id = *nextid;
 
        id = (id < base) ? 0 : id - base;
        slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
index b4e2234..2efb213 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/random.h>
 #include <linux/highmem.h>
+#include <linux/sched.h>
 
 /* General test specific settings */
 #define MAX_SUBTESTS   3
-#define MAX_TESTRUNS   10000
+#define MAX_TESTRUNS   1000
 #define MAX_DATA       128
 #define MAX_INSNS      512
 #define MAX_K          0xffffFFFF
@@ -6582,6 +6583,7 @@ static __init int test_bpf(void)
                struct bpf_prog *fp;
                int err;
 
+               cond_resched();
                if (exclude_test(i))
                        continue;
 
index e21aa14..be09a98 100644 (file)
@@ -157,7 +157,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
  * Return: 0 on success, a negative error code otherwise.
  */
 static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
-                                    int max_if_num)
+                                    unsigned int max_if_num)
 {
        void *data_ptr;
        size_t old_size;
@@ -201,7 +201,8 @@ unlock:
  */
 static void
 batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
-                                  int max_if_num, int del_if_num)
+                                  unsigned int max_if_num,
+                                  unsigned int del_if_num)
 {
        size_t chunk_size;
        size_t if_offset;
@@ -239,7 +240,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
  */
 static void
 batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
-                                      int max_if_num, int del_if_num)
+                                      unsigned int max_if_num,
+                                      unsigned int del_if_num)
 {
        size_t if_offset;
        void *data_ptr;
@@ -276,7 +278,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
  * Return: 0 on success, a negative error code otherwise.
  */
 static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
-                                    int max_if_num, int del_if_num)
+                                    unsigned int max_if_num,
+                                    unsigned int del_if_num)
 {
        spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
@@ -311,7 +314,8 @@ static struct batadv_orig_node *
 batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
 {
        struct batadv_orig_node *orig_node;
-       int size, hash_added;
+       int hash_added;
+       size_t size;
 
        orig_node = batadv_orig_hash_find(bat_priv, addr);
        if (orig_node)
@@ -893,7 +897,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
        u32 i;
        size_t word_index;
        u8 *w;
-       int if_num;
+       unsigned int if_num;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1023,7 +1027,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        struct batadv_neigh_node *tmp_neigh_node = NULL;
        struct batadv_neigh_node *router = NULL;
        struct batadv_orig_node *orig_node_tmp;
-       int if_num;
+       unsigned int if_num;
        u8 sum_orig, sum_neigh;
        u8 *neigh_addr;
        u8 tq_avg;
@@ -1182,7 +1186,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        u8 total_count;
        u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
        unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
-       int if_num;
+       unsigned int if_num;
        unsigned int tq_asym_penalty, inv_asym_penalty;
        unsigned int combined_tq;
        unsigned int tq_iface_penalty;
@@ -1702,9 +1706,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
 
        if (is_my_orig) {
                unsigned long *word;
-               int offset;
+               size_t offset;
                s32 bit_pos;
-               s16 if_num;
+               unsigned int if_num;
                u8 *weight;
 
                orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
@@ -2729,7 +2733,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
        struct batadv_gw_node *curr_gw;
-       int ret = -EINVAL;
+       int ret = 0;
        void *hdr;
 
        router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
index 9c3a34b..ec93337 100644 (file)
@@ -928,7 +928,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
        struct batadv_gw_node *curr_gw;
-       int ret = -EINVAL;
+       int ret = 0;
        void *hdr;
 
        router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
index 8ff8134..a2de5a4 100644 (file)
@@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_bla_claim *claim;
        int idx = 0;
+       int ret = 0;
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(claim, head, hash_entry) {
                if (idx++ < *idx_skip)
                        continue;
-               if (batadv_bla_claim_dump_entry(msg, portid, seq,
-                                               primary_if, claim)) {
+
+               ret = batadv_bla_claim_dump_entry(msg, portid, seq,
+                                                 primary_if, claim);
+               if (ret) {
                        *idx_skip = idx - 1;
                        goto unlock;
                }
        }
 
-       *idx_skip = idx;
+       *idx_skip = 0;
 unlock:
        rcu_read_unlock();
-       return 0;
+       return ret;
 }
 
 /**
@@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_bla_backbone_gw *backbone_gw;
        int idx = 0;
+       int ret = 0;
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
                if (idx++ < *idx_skip)
                        continue;
-               if (batadv_bla_backbone_dump_entry(msg, portid, seq,
-                                                  primary_if, backbone_gw)) {
+
+               ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
+                                                    primary_if, backbone_gw);
+               if (ret) {
                        *idx_skip = idx - 1;
                        goto unlock;
                }
        }
 
-       *idx_skip = idx;
+       *idx_skip = 0;
 unlock:
        rcu_read_unlock();
-       return 0;
+       return ret;
 }
 
 /**
index d815acc..0fddc17 100644 (file)
@@ -288,7 +288,8 @@ batadv_frag_merge_packets(struct hlist_head *chain)
        /* Move the existing MAC header to just before the payload. (Override
         * the fragment header.)
         */
-       skb_pull_rcsum(skb_out, hdr_size);
+       skb_pull(skb_out, hdr_size);
+       skb_out->ip_summed = CHECKSUM_NONE;
        memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
        skb_set_mac_header(skb_out, -ETH_HLEN);
        skb_reset_network_header(skb_out);
index fd4a263..c405d15 100644 (file)
@@ -763,6 +763,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        hard_iface->soft_iface = soft_iface;
        bat_priv = netdev_priv(hard_iface->soft_iface);
 
+       if (bat_priv->num_ifaces >= UINT_MAX) {
+               ret = -ENOSPC;
+               goto err_dev;
+       }
+
        ret = netdev_master_upper_dev_link(hard_iface->net_dev,
                                           soft_iface, NULL, NULL, NULL);
        if (ret)
@@ -876,7 +881,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
 
        /* nobody uses this interface anymore */
-       if (!bat_priv->num_ifaces) {
+       if (bat_priv->num_ifaces == 0) {
                batadv_gw_check_client_stop(bat_priv);
 
                if (autodel == BATADV_IF_CLEANUP_AUTO)
@@ -912,7 +917,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
        if (ret)
                goto free_if;
 
-       hard_iface->if_num = -1;
+       hard_iface->if_num = 0;
        hard_iface->net_dev = net_dev;
        hard_iface->soft_iface = NULL;
        hard_iface->if_status = BATADV_IF_NOT_IN_USE;
index 2a51a0c..716e5b4 100644 (file)
@@ -1569,7 +1569,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
  * Return: 0 on success or negative error number in case of failure
  */
 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
-                           int max_if_num)
+                           unsigned int max_if_num)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_algo_ops *bao = bat_priv->algo_ops;
@@ -1611,7 +1611,7 @@ err:
  * Return: 0 on success or negative error number in case of failure
  */
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
-                           int max_if_num)
+                           unsigned int max_if_num)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hashtable *hash = bat_priv->orig_hash;
index f3601ab..3b3f59b 100644 (file)
@@ -73,9 +73,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
-                           int max_if_num);
+                           unsigned int max_if_num);
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
-                           int max_if_num);
+                           unsigned int max_if_num);
 struct batadv_orig_node_vlan *
 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
                          unsigned short vid);
index c95e2b2..edeffcb 100644 (file)
@@ -459,13 +459,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        /* skb->dev & skb->pkt_type are set here */
        skb->protocol = eth_type_trans(skb, soft_iface);
-
-       /* should not be necessary anymore as we use skb_pull_rcsum()
-        * TODO: please verify this and remove this TODO
-        * -- Dec 21st 2009, Simon Wunderlich
-        */
-
-       /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
        batadv_inc_counter(bat_priv, BATADV_CNT_RX);
        batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
index 4a3b883..476b052 100644 (file)
@@ -167,7 +167,7 @@ struct batadv_hard_iface {
        struct list_head list;
 
        /** @if_num: identificator of the interface */
-       s16 if_num;
+       unsigned int if_num;
 
        /** @if_status: status of the interface for batman-adv */
        char if_status;
@@ -1596,7 +1596,7 @@ struct batadv_priv {
        atomic_t batman_queue_left;
 
        /** @num_ifaces: number of interfaces assigned to this mesh interface */
-       char num_ifaces;
+       unsigned int num_ifaces;
 
        /** @mesh_obj: kobject for sysfs mesh subdirectory */
        struct kobject *mesh_obj;
@@ -2186,15 +2186,16 @@ struct batadv_algo_orig_ops {
         *  orig_node due to a new hard-interface being added into the mesh
         *  (optional)
         */
-       int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+       int (*add_if)(struct batadv_orig_node *orig_node,
+                     unsigned int max_if_num);
 
        /**
         * @del_if: ask the routing algorithm to apply the needed changes to the
         *  orig_node due to an hard-interface being removed from the mesh
         *  (optional)
         */
-       int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
-                     int del_if_num);
+       int (*del_if)(struct batadv_orig_node *orig_node,
+                     unsigned int max_if_num, unsigned int del_if_num);
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
        /** @print: print the originator table (optional) */
index 484f541..c2120eb 100644 (file)
@@ -214,7 +214,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
 
        iph = ip_hdr(skb);
        if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
-               goto inhdr_error;
+               goto csum_error;
 
        len = ntohs(iph->tot_len);
        if (skb->len < len) {
@@ -236,6 +236,8 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
         */
        return 0;
 
+csum_error:
+       __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
 inhdr_error:
        __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 drop:
index 5193527..9896f49 100644 (file)
@@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
                masterv = br_vlan_find(vg, vid);
                if (WARN_ON(!masterv))
                        return NULL;
+               refcount_set(&masterv->refcnt, 1);
+               return masterv;
        }
        refcount_inc(&masterv->refcnt);
 
index ce7152a..c5afb42 100644 (file)
@@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
        return true;
 }
 
+static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
+{
+       return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
+}
+
 static int ebt_among_mt_check(const struct xt_mtchk_param *par)
 {
        const struct ebt_among_info *info = par->matchinfo;
        const struct ebt_entry_match *em =
                container_of(par->matchinfo, const struct ebt_entry_match, data);
-       int expected_length = sizeof(struct ebt_among_info);
+       unsigned int expected_length = sizeof(struct ebt_among_info);
        const struct ebt_mac_wormhash *wh_dst, *wh_src;
        int err;
 
+       if (expected_length > em->match_size)
+               return -EINVAL;
+
        wh_dst = ebt_among_wh_dst(info);
-       wh_src = ebt_among_wh_src(info);
+       if (poolsize_invalid(wh_dst))
+               return -EINVAL;
+
        expected_length += ebt_mac_wormhash_size(wh_dst);
+       if (expected_length > em->match_size)
+               return -EINVAL;
+
+       wh_src = ebt_among_wh_src(info);
+       if (poolsize_invalid(wh_src))
+               return -EINVAL;
+
        expected_length += ebt_mac_wormhash_size(wh_src);
 
        if (em->match_size != EBT_ALIGN(expected_length)) {
index 02c4b40..254ef9f 100644 (file)
@@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
        int off = ebt_compat_match_offset(match, m->match_size);
        compat_uint_t msize = m->match_size - off;
 
-       BUG_ON(off >= m->match_size);
+       if (WARN_ON(off >= m->match_size))
+               return -EINVAL;
 
        if (copy_to_user(cm->u.name, match->name,
            strlen(match->name) + 1) || put_user(msize, &cm->match_size))
@@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
        int off = xt_compat_target_offset(target);
        compat_uint_t tsize = t->target_size - off;
 
-       BUG_ON(off >= t->target_size);
+       if (WARN_ON(off >= t->target_size))
+               return -EINVAL;
 
        if (copy_to_user(cm->u.name, target->name,
            strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
@@ -1902,7 +1904,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state,
        if (state->buf_kern_start == NULL)
                goto count_only;
 
-       BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
+       if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
+               return -EINVAL;
 
        memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
 
@@ -1915,7 +1918,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
 {
        char *b = state->buf_kern_start;
 
-       BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
+       if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
+               return -EINVAL;
 
        if (b != NULL && sz > 0)
                memset(b + state->buf_kern_offset, 0, sz);
@@ -1992,8 +1996,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
        pad = XT_ALIGN(size_kern) - size_kern;
 
        if (pad > 0 && dst) {
-               BUG_ON(state->buf_kern_len <= pad);
-               BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
+               if (WARN_ON(state->buf_kern_len <= pad))
+                       return -EINVAL;
+               if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
+                       return -EINVAL;
                memset(dst + size_kern, 0, pad);
        }
        return off + match_size;
@@ -2043,7 +2049,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
                if (ret < 0)
                        return ret;
 
-               BUG_ON(ret < match32->match_size);
+               if (WARN_ON(ret < match32->match_size))
+                       return -EINVAL;
                growth += ret - match32->match_size;
                growth += ebt_compat_entry_padsize();
 
@@ -2053,7 +2060,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
                if (match_kern)
                        match_kern->match_size = ret;
 
-               WARN_ON(type == EBT_COMPAT_TARGET && size_left);
+               if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+                       return -EINVAL;
+
                match32 = (struct compat_ebt_entry_mwt *) buf;
        }
 
@@ -2109,6 +2118,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
         *
         * offsets are relative to beginning of struct ebt_entry (i.e., 0).
         */
+       for (i = 0; i < 4 ; ++i) {
+               if (offsets[i] >= *total)
+                       return -EINVAL;
+               if (i == 0)
+                       continue;
+               if (offsets[i-1] > offsets[i])
+                       return -EINVAL;
+       }
+
        for (i = 0, j = 1 ; j < 4 ; j++, i++) {
                struct compat_ebt_entry_mwt *match32;
                unsigned int size;
@@ -2140,7 +2158,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
 
        startoff = state->buf_user_offset - startoff;
 
-       BUG_ON(*total < startoff);
+       if (WARN_ON(*total < startoff))
+               return -EINVAL;
        *total -= startoff;
        return 0;
 }
@@ -2267,7 +2286,8 @@ static int compat_do_replace(struct net *net, void __user *user,
        state.buf_kern_len = size64;
 
        ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
-       BUG_ON(ret < 0);        /* parses same data again */
+       if (WARN_ON(ret < 0))
+               goto out_unlock;
 
        vfree(entries_tmp);
        tmp.entries_size = size64;
index 1e492ef..4d4c822 100644 (file)
@@ -418,6 +418,7 @@ ceph_parse_options(char *options, const char *dev_name,
                                opt->flags |= CEPH_OPT_FSID;
                        break;
                case Opt_name:
+                       kfree(opt->name);
                        opt->name = kstrndup(argstr[0].from,
                                              argstr[0].to-argstr[0].from,
                                              GFP_KERNEL);
@@ -427,6 +428,9 @@ ceph_parse_options(char *options, const char *dev_name,
                        }
                        break;
                case Opt_secret:
+                       ceph_crypto_key_destroy(opt->key);
+                       kfree(opt->key);
+
                        opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
                        if (!opt->key) {
                                err = -ENOMEM;
@@ -437,6 +441,9 @@ ceph_parse_options(char *options, const char *dev_name,
                                goto out;
                        break;
                case Opt_key:
+                       ceph_crypto_key_destroy(opt->key);
+                       kfree(opt->key);
+
                        opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
                        if (!opt->key) {
                                err = -ENOMEM;
index 8b51f92..e5b8d42 100644 (file)
@@ -6396,6 +6396,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                .linking = true,
                .upper_info = upper_info,
        };
+       struct net_device *master_dev;
        int ret = 0;
 
        ASSERT_RTNL();
@@ -6407,11 +6408,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (netdev_has_upper_dev(upper_dev, dev))
                return -EBUSY;
 
-       if (netdev_has_upper_dev(dev, upper_dev))
-               return -EEXIST;
-
-       if (master && netdev_master_upper_dev_get(dev))
-               return -EBUSY;
+       if (!master) {
+               if (netdev_has_upper_dev(dev, upper_dev))
+                       return -EEXIST;
+       } else {
+               master_dev = netdev_master_upper_dev_get(dev);
+               if (master_dev)
+                       return master_dev == upper_dev ? -EEXIST : -EBUSY;
+       }
 
        ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
                                            &changeupper_info.info);
index 88e8467..1b5bf0d 100644 (file)
@@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
                goto nla_put_failure;
 
        if (table->resource_valid) {
-               nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
-                                 table->resource_id, DEVLINK_ATTR_PAD);
-               nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
-                                 table->resource_units, DEVLINK_ATTR_PAD);
+               if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+                                     table->resource_id, DEVLINK_ATTR_PAD) ||
+                   nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+                                     table->resource_units, DEVLINK_ATTR_PAD))
+                       goto nla_put_failure;
        }
        if (devlink_dpipe_matches_put(table, skb))
                goto nla_put_failure;
@@ -2332,7 +2333,7 @@ devlink_resource_validate_children(struct devlink_resource *resource)
        list_for_each_entry(child_resource, &resource->resource_list, list)
                parts_size += child_resource->size_new;
 
-       if (parts_size > resource->size)
+       if (parts_size > resource->size_new)
                size_valid = false;
 out:
        resource->size_valid = size_valid;
@@ -2345,17 +2346,17 @@ devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
        u64 reminder;
        int err = 0;
 
-       if (size > resource->size_params->size_max) {
+       if (size > resource->size_params.size_max) {
                NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
                err = -EINVAL;
        }
 
-       if (size < resource->size_params->size_min) {
+       if (size < resource->size_params.size_min) {
                NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
                err = -EINVAL;
        }
 
-       div64_u64_rem(size, resource->size_params->size_granularity, &reminder);
+       div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
        if (reminder) {
                NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
                err = -EINVAL;
@@ -2394,20 +2395,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
        return 0;
 }
 
-static void
+static int
 devlink_resource_size_params_put(struct devlink_resource *resource,
                                 struct sk_buff *skb)
 {
        struct devlink_resource_size_params *size_params;
 
-       size_params = resource->size_params;
-       nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
-                         size_params->size_granularity, DEVLINK_ATTR_PAD);
-       nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
-                         size_params->size_max, DEVLINK_ATTR_PAD);
-       nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
-                         size_params->size_min, DEVLINK_ATTR_PAD);
-       nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
+       size_params = &resource->size_params;
+       if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+                             size_params->size_granularity, DEVLINK_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+                             size_params->size_max, DEVLINK_ATTR_PAD) ||
+           nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+                             size_params->size_min, DEVLINK_ATTR_PAD) ||
+           nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
+               return -EMSGSIZE;
+       return 0;
 }
 
 static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@@ -2431,10 +2434,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
                nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
                                  resource->size_new, DEVLINK_ATTR_PAD);
        if (resource->resource_ops && resource->resource_ops->occ_get)
-               nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
-                                 resource->resource_ops->occ_get(devlink),
-                                 DEVLINK_ATTR_PAD);
-       devlink_resource_size_params_put(resource, skb);
+               if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+                                     resource->resource_ops->occ_get(devlink),
+                                     DEVLINK_ATTR_PAD))
+                       goto nla_put_failure;
+       if (devlink_resource_size_params_put(resource, skb))
+               goto nla_put_failure;
        if (list_empty(&resource->resource_list))
                goto out;
 
@@ -3173,7 +3178,7 @@ int devlink_resource_register(struct devlink *devlink,
                              u64 resource_size,
                              u64 resource_id,
                              u64 parent_resource_id,
-                             struct devlink_resource_size_params *size_params,
+                             const struct devlink_resource_size_params *size_params,
                              const struct devlink_resource_ops *resource_ops)
 {
        struct devlink_resource *resource;
@@ -3216,7 +3221,8 @@ int devlink_resource_register(struct devlink *devlink,
        resource->id = resource_id;
        resource->resource_ops = resource_ops;
        resource->size_valid = true;
-       resource->size_params = size_params;
+       memcpy(&resource->size_params, size_params,
+              sizeof(resource->size_params));
        INIT_LIST_HEAD(&resource->resource_list);
        list_add_tail(&resource->list, resource_list);
 out:
index 494e6a5..3f89c76 100644 (file)
@@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
 static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
+       int rc;
 
        if (!dev->ethtool_ops->get_fecparam)
                return -EOPNOTSUPP;
 
-       dev->ethtool_ops->get_fecparam(dev, &fecparam);
+       rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
+       if (rc)
+               return rc;
 
        if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
                return -EFAULT;
index 96d36b8..715c134 100644 (file)
@@ -4893,7 +4893,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
  *
  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
  */
-unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
 {
        const struct skb_shared_info *shinfo = skb_shinfo(skb);
        unsigned int thlen = 0;
@@ -4915,7 +4915,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
         */
        return thlen + shinfo->gso_size;
 }
-EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+       unsigned int hdr_len = skb_transport_header(skb) -
+                              skb_network_header(skb);
+
+       return hdr_len + skb_gso_transport_seglen(skb);
+}
+
+/**
+ * skb_gso_mac_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_mac_seglen is used to determine the real size of the
+ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
+ * headers (TCP/UDP).
+ */
+static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
+{
+       unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+       return hdr_len + skb_gso_transport_seglen(skb);
+}
 
 /**
  * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
@@ -4957,19 +4990,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb,
 }
 
 /**
- * skb_gso_validate_mtu - Return in case such skb fits a given MTU
+ * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
  *
  * @skb: GSO skb
  * @mtu: MTU to validate against
  *
- * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
- * once split.
+ * skb_gso_validate_network_len validates if a given skb will fit a
+ * wanted MTU once split. It considers L3 headers, L4 headers, and the
+ * payload.
  */
-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
 {
        return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
 }
-EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
+EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
 
 /**
  * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
index 2dd21c3..b54b948 100644 (file)
@@ -55,7 +55,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        if (skb->ignore_df)
                return false;
 
-       if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+       if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
                return false;
 
        return true;
index 95fd225..2fa2ef2 100644 (file)
@@ -973,9 +973,6 @@ static void __gre_tunnel_init(struct net_device *dev)
 
        t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
-       dev->needed_headroom    = LL_MAX_HEADER + t_hlen + 4;
-       dev->mtu                = ETH_DATA_LEN - t_hlen - 4;
-
        dev->features           |= GRE_FEATURES;
        dev->hw_features        |= GRE_FEATURES;
 
@@ -1294,8 +1291,6 @@ static int erspan_tunnel_init(struct net_device *dev)
                       erspan_hdr_len(tunnel->erspan_ver);
        t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
-       dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
-       dev->mtu = ETH_DATA_LEN - t_hlen - 4;
        dev->features           |= GRE_FEATURES;
        dev->hw_features        |= GRE_FEATURES;
        dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE;
index e8e675b..66340ab 100644 (file)
@@ -248,7 +248,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
 
        /* common case: seglen is <= mtu
         */
-       if (skb_gso_validate_mtu(skb, mtu))
+       if (skb_gso_validate_network_len(skb, mtu))
                return ip_finish_output2(net, sk, skb);
 
        /* Slowpath -  GSO segment length exceeds the egress MTU.
index b2117d8..602597d 100644 (file)
@@ -694,16 +694,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       if (tunnel->fwmark) {
-               ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
-                                   tunnel->parms.o_key, RT_TOS(tos),
-                                   tunnel->parms.link, tunnel->fwmark);
-       }
-       else {
-               ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
-                                   tunnel->parms.o_key, RT_TOS(tos),
-                                   tunnel->parms.link, skb->mark);
-       }
+       ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
+                           tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+                           tunnel->fwmark);
 
        if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
                goto tx_error;
index 08b3e48..0fc88fa 100644 (file)
@@ -232,7 +232,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
        c->hash_mode = i->hash_mode;
        c->hash_initval = i->hash_initval;
        refcount_set(&c->refcount, 1);
-       refcount_set(&c->entries, 1);
 
        spin_lock_bh(&cn->lock);
        if (__clusterip_config_find(net, ip)) {
@@ -263,8 +262,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
 
        c->notifier.notifier_call = clusterip_netdev_event;
        err = register_netdevice_notifier(&c->notifier);
-       if (!err)
+       if (!err) {
+               refcount_set(&c->entries, 1);
                return c;
+       }
 
 #ifdef CONFIG_PROC_FS
        proc_remove(c->pde);
@@ -273,7 +274,7 @@ err:
        spin_lock_bh(&cn->lock);
        list_del_rcu(&c->list);
        spin_unlock_bh(&cn->lock);
-       kfree(c);
+       clusterip_config_put(c);
 
        return ERR_PTR(err);
 }
@@ -496,12 +497,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
                                return PTR_ERR(config);
                }
        }
-       cipinfo->config = config;
 
        ret = nf_ct_netns_get(par->net, par->family);
-       if (ret < 0)
+       if (ret < 0) {
                pr_info("cannot load conntrack support for proto=%u\n",
                        par->family);
+               clusterip_config_entry_put(par->net, config);
+               clusterip_config_put(config);
+               return ret;
+       }
 
        if (!par->net->xt.clusterip_deprecated_warning) {
                pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
@@ -509,6 +513,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
                par->net->xt.clusterip_deprecated_warning = true;
        }
 
+       cipinfo->config = config;
        return ret;
 }
 
index 25d2975..0cd46bf 100644 (file)
@@ -111,6 +111,7 @@ static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
        default:
                return -1;
        }
+       csum_replace4(&iph->check, addr, new_addr);
 
        return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
 }
@@ -185,7 +186,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
                return false;
 
-       if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+       if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
                return false;
 
        return true;
index 6a7b3cb..e74ee83 100644 (file)
@@ -128,10 +128,11 @@ static int ip_rt_redirect_silence __read_mostly   = ((HZ / 50) << (9 + 1));
 static int ip_rt_error_cost __read_mostly      = HZ;
 static int ip_rt_error_burst __read_mostly     = 5 * HZ;
 static int ip_rt_mtu_expires __read_mostly     = 10 * 60 * HZ;
-static int ip_rt_min_pmtu __read_mostly                = 512 + 20 + 20;
+static u32 ip_rt_min_pmtu __read_mostly                = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+
 /*
  *     Interface to generic destination cache.
  */
@@ -931,14 +932,23 @@ out_put_peer:
 
 static int ip_error(struct sk_buff *skb)
 {
-       struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
        struct rtable *rt = skb_rtable(skb);
+       struct net_device *dev = skb->dev;
+       struct in_device *in_dev;
        struct inet_peer *peer;
        unsigned long now;
        struct net *net;
        bool send;
        int code;
 
+       if (netif_is_l3_master(skb->dev)) {
+               dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
+               if (!dev)
+                       goto out;
+       }
+
+       in_dev = __in_dev_get_rcu(dev);
+
        /* IP on this device is disabled. */
        if (!in_dev)
                goto out;
@@ -2810,6 +2820,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_gc_elasticity __read_mostly   = 8;
+static int ip_min_valid_pmtu __read_mostly     = IPV4_MIN_MTU;
 
 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
                                        void __user *buffer,
@@ -2925,7 +2936,8 @@ static struct ctl_table ipv4_route_table[] = {
                .data           = &ip_rt_min_pmtu,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &ip_min_valid_pmtu,
        },
        {
                .procname       = "min_adv_mss",
index 7c84357..faddf4f 100644 (file)
@@ -6,7 +6,7 @@
  * The algorithm is described in:
  * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
  *  for High-Speed Networks"
- * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
+ * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
  *
  * Implemented from description in paper and ns-2 simulation.
  * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
index 06b9c47..451ef30 100644 (file)
@@ -1968,11 +1968,6 @@ void tcp_enter_loss(struct sock *sk)
        /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
         * loss recovery is underway except recurring timeout(s) on
         * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
-        *
-        * In theory F-RTO can be used repeatedly during loss recovery.
-        * In practice this interacts badly with broken middle-boxes that
-        * falsely raise the receive window, which results in repeated
-        * timeouts and stop-and-go behavior.
         */
        tp->frto = net->ipv4.sysctl_tcp_frto &&
                   (new_recovery || icsk->icsk_retransmits) &&
@@ -2628,18 +2623,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
            tcp_try_undo_loss(sk, false))
                return;
 
-       /* The ACK (s)acks some never-retransmitted data meaning not all
-        * the data packets before the timeout were lost. Therefore we
-        * undo the congestion window and state. This is essentially
-        * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
-        * a retransmitted skb is permantly marked, we can apply such an
-        * operation even if F-RTO was not used.
-        */
-       if ((flag & FLAG_ORIG_SACK_ACKED) &&
-           tcp_try_undo_loss(sk, tp->undo_marker))
-               return;
-
        if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
+               /* Step 3.b. A timeout is spurious if not all data are
+                * lost, i.e., never-retransmitted data are (s)acked.
+                */
+               if ((flag & FLAG_ORIG_SACK_ACKED) &&
+                   tcp_try_undo_loss(sk, true))
+                       return;
+
                if (after(tp->snd_nxt, tp->high_seq)) {
                        if (flag & FLAG_DATA_SACKED || is_dupack)
                                tp->frto = 0; /* Step 3.a. loss was real */
@@ -3998,6 +3989,7 @@ void tcp_reset(struct sock *sk)
        /* This barrier is coupled with smp_rmb() in tcp_poll() */
        smp_wmb();
 
+       tcp_write_queue_purge(sk);
        tcp_done(sk);
 
        if (!sock_flag(sk, SOCK_DEAD))
index 94b8702..be980c1 100644 (file)
@@ -30,7 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
 
        mtu = dst_mtu(skb_dst(skb));
        if ((!skb_is_gso(skb) && skb->len > mtu) ||
-           (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) {
+           (skb_is_gso(skb) &&
+            !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
                skb->protocol = htons(ETH_P_IP);
 
                if (skb->sk)
index a6eb0e6..2c7f09c 100644 (file)
@@ -412,7 +412,7 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
        if (skb->ignore_df)
                return false;
 
-       if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+       if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
                return false;
 
        return true;
index 1124f31..56c4967 100644 (file)
@@ -1982,14 +1982,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
 {
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       struct ip6_tnl *nt, *t;
        struct ip_tunnel_encap ipencap;
+       struct ip6_tnl *nt, *t;
+       int err;
 
        nt = netdev_priv(dev);
 
        if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
-               int err = ip6_tnl_encap_setup(nt, &ipencap);
-
+               err = ip6_tnl_encap_setup(nt, &ipencap);
                if (err < 0)
                        return err;
        }
@@ -2005,7 +2005,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
                        return -EEXIST;
        }
 
-       return ip6_tnl_create2(dev);
+       err = ip6_tnl_create2(dev);
+       if (!err && tb[IFLA_MTU])
+               ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
+       return err;
 }
 
 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
index d95ceca..531d695 100644 (file)
 int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
 {
        const struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct sock *sk = sk_to_full_sk(skb->sk);
        unsigned int hh_len;
        struct dst_entry *dst;
        struct flowi6 fl6 = {
-               .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
+               .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
                .flowi6_mark = skb->mark,
-               .flowi6_uid = sock_net_uid(net, skb->sk),
+               .flowi6_uid = sock_net_uid(net, sk),
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
        int err;
 
-       dst = ip6_route_output(net, skb->sk, &fl6);
+       dst = ip6_route_output(net, sk, &fl6);
        err = dst->error;
        if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -50,7 +51,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
        if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
            xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
                skb_dst_set(skb, NULL);
-               dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0);
+               dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
                if (IS_ERR(dst))
                        return PTR_ERR(dst);
                skb_dst_set(skb, dst);
index 910a273..d12f511 100644 (file)
@@ -48,10 +48,6 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
        }
 
        fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
-       if ((flags & XT_RPFILTER_LOOSE) == 0) {
-               fl6.flowi6_oif = dev->ifindex;
-               lookup_flags |= RT6_LOOKUP_F_IFACE;
-       }
 
        rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
        if (rt->dst.error)
index d346705..207cb35 100644 (file)
@@ -178,7 +178,7 @@ static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        if (skb->len <= mtu)
                return false;
 
-       if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+       if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
                return false;
 
        return true;
index bed57ee..6b7f075 100644 (file)
@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
            !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
                                target, maniptype))
                return false;
+
+       /* must reload, offset might have changed */
+       ipv6h = (void *)skb->data + iphdroff;
+
 manip_addr:
        if (maniptype == NF_NAT_MANIP_SRC)
                ipv6h->saddr = target->src.u3.in6;
index 3230b3d..36be3cf 100644 (file)
@@ -180,7 +180,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
        }
 
        *dest = 0;
- again:
        rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
                                      lookup_flags);
        if (rt->dst.error)
@@ -190,15 +189,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
        if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
                goto put_rt_err;
 
-       if (oif && oif != rt->rt6i_idev->dev) {
-               /* multipath route? Try again with F_IFACE */
-               if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
-                       lookup_flags |= RT6_LOOKUP_F_IFACE;
-                       fl6.flowi6_oif = oif->ifindex;
-                       ip6_rt_put(rt);
-                       goto again;
-               }
-       }
+       if (oif && oif != rt->rt6i_idev->dev)
+               goto put_rt_err;
 
        switch (priv->result) {
        case NFT_FIB_RESULT_OIF:
index 182db07..a9c4ac6 100644 (file)
@@ -1578,6 +1578,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                return err;
 
+       if (tb[IFLA_MTU]) {
+               u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+
+               if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
+                       dev->mtu = mtu;
+       }
+
 #ifdef CONFIG_IPV6_SIT_6RD
        if (ipip6_netlink_6rd_parms(data, &ip6rd))
                err = ipip6_tunnel_update_6rd(nt, &ip6rd);
index 8ae87d4..5959ce9 100644 (file)
@@ -82,7 +82,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
 
        if ((!skb_is_gso(skb) && skb->len > mtu) ||
            (skb_is_gso(skb) &&
-            skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) {
+            !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
                skb->dev = dst->dev;
                skb->protocol = htons(ETH_P_IPV6);
 
index 194a748..83421c6 100644 (file)
@@ -136,51 +136,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 
 }
 
-/* Lookup the tunnel socket, possibly involving the fs code if the socket is
- * owned by userspace.  A struct sock returned from this function must be
- * released using l2tp_tunnel_sock_put once you're done with it.
- */
-static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
-{
-       int err = 0;
-       struct socket *sock = NULL;
-       struct sock *sk = NULL;
-
-       if (!tunnel)
-               goto out;
-
-       if (tunnel->fd >= 0) {
-               /* Socket is owned by userspace, who might be in the process
-                * of closing it.  Look the socket up using the fd to ensure
-                * consistency.
-                */
-               sock = sockfd_lookup(tunnel->fd, &err);
-               if (sock)
-                       sk = sock->sk;
-       } else {
-               /* Socket is owned by kernelspace */
-               sk = tunnel->sock;
-               sock_hold(sk);
-       }
-
-out:
-       return sk;
-}
-
-/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
-static void l2tp_tunnel_sock_put(struct sock *sk)
-{
-       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
-       if (tunnel) {
-               if (tunnel->fd >= 0) {
-                       /* Socket is owned by userspace */
-                       sockfd_put(sk->sk_socket);
-               }
-               sock_put(sk);
-       }
-       sock_put(sk);
-}
-
 /* Session hash list.
  * The session_id SHOULD be random according to RFC2661, but several
  * L2TP implementations (Cisco and Microsoft) use incrementing
@@ -193,6 +148,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
        return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
 }
 
+void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+{
+       sock_put(tunnel->sock);
+       /* the tunnel is freed in the socket destructor */
+}
+EXPORT_SYMBOL(l2tp_tunnel_free);
+
 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
 {
@@ -345,13 +307,11 @@ int l2tp_session_register(struct l2tp_session *session,
                        }
 
                l2tp_tunnel_inc_refcount(tunnel);
-               sock_hold(tunnel->sock);
                hlist_add_head_rcu(&session->global_hlist, g_head);
 
                spin_unlock_bh(&pn->l2tp_session_hlist_lock);
        } else {
                l2tp_tunnel_inc_refcount(tunnel);
-               sock_hold(tunnel->sock);
        }
 
        hlist_add_head(&session->hlist, head);
@@ -969,7 +929,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct l2tp_tunnel *tunnel;
 
-       tunnel = l2tp_sock_to_tunnel(sk);
+       tunnel = l2tp_tunnel(sk);
        if (tunnel == NULL)
                goto pass_up;
 
@@ -977,13 +937,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                 tunnel->name, skb->len);
 
        if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
-               goto pass_up_put;
+               goto pass_up;
 
-       sock_put(sk);
        return 0;
 
-pass_up_put:
-       sock_put(sk);
 pass_up:
        return 1;
 }
@@ -1207,14 +1164,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
 static void l2tp_tunnel_destruct(struct sock *sk)
 {
        struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
-       struct l2tp_net *pn;
 
        if (tunnel == NULL)
                goto end;
 
        l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
 
-
        /* Disable udp encapsulation */
        switch (tunnel->encap) {
        case L2TP_ENCAPTYPE_UDP:
@@ -1231,18 +1186,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
        sk->sk_destruct = tunnel->old_sk_destruct;
        sk->sk_user_data = NULL;
 
-       /* Remove the tunnel struct from the tunnel list */
-       pn = l2tp_pernet(tunnel->l2tp_net);
-       spin_lock_bh(&pn->l2tp_tunnel_list_lock);
-       list_del_rcu(&tunnel->list);
-       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-
-       tunnel->sock = NULL;
-       l2tp_tunnel_dec_refcount(tunnel);
-
        /* Call the original destructor */
        if (sk->sk_destruct)
                (*sk->sk_destruct)(sk);
+
+       kfree_rcu(tunnel, rcu);
 end:
        return;
 }
@@ -1303,49 +1251,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
 /* Tunnel socket destroy hook for UDP encapsulation */
 static void l2tp_udp_encap_destroy(struct sock *sk)
 {
-       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
-       if (tunnel) {
-               l2tp_tunnel_closeall(tunnel);
-               sock_put(sk);
-       }
+       struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
+
+       if (tunnel)
+               l2tp_tunnel_delete(tunnel);
 }
 
 /* Workqueue tunnel deletion function */
 static void l2tp_tunnel_del_work(struct work_struct *work)
 {
-       struct l2tp_tunnel *tunnel = NULL;
-       struct socket *sock = NULL;
-       struct sock *sk = NULL;
-
-       tunnel = container_of(work, struct l2tp_tunnel, del_work);
+       struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
+                                                 del_work);
+       struct sock *sk = tunnel->sock;
+       struct socket *sock = sk->sk_socket;
+       struct l2tp_net *pn;
 
        l2tp_tunnel_closeall(tunnel);
 
-       sk = l2tp_tunnel_sock_lookup(tunnel);
-       if (!sk)
-               goto out;
-
-       sock = sk->sk_socket;
-
-       /* If the tunnel socket was created by userspace, then go through the
-        * inet layer to shut the socket down, and let userspace close it.
-        * Otherwise, if we created the socket directly within the kernel, use
+       /* If the tunnel socket was created within the kernel, use
         * the sk API to release it here.
-        * In either case the tunnel resources are freed in the socket
-        * destructor when the tunnel socket goes away.
         */
-       if (tunnel->fd >= 0) {
-               if (sock)
-                       inet_shutdown(sock, 2);
-       } else {
+       if (tunnel->fd < 0) {
                if (sock) {
                        kernel_sock_shutdown(sock, SHUT_RDWR);
                        sock_release(sock);
                }
        }
 
-       l2tp_tunnel_sock_put(sk);
-out:
+       /* Remove the tunnel struct from the tunnel list */
+       pn = l2tp_pernet(tunnel->l2tp_net);
+       spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+       list_del_rcu(&tunnel->list);
+       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+
+       /* drop initial ref */
+       l2tp_tunnel_dec_refcount(tunnel);
+
+       /* drop workqueue ref */
        l2tp_tunnel_dec_refcount(tunnel);
 }
 
@@ -1598,13 +1540,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
                sk->sk_user_data = tunnel;
        }
 
+       /* Bump the reference count. The tunnel context is deleted
+        * only when this drops to zero. A reference is also held on
+        * the tunnel socket to ensure that it is not released while
+        * the tunnel is extant. Must be done before sk_destruct is
+        * set.
+        */
+       refcount_set(&tunnel->ref_count, 1);
+       sock_hold(sk);
+       tunnel->sock = sk;
+       tunnel->fd = fd;
+
        /* Hook on the tunnel socket destructor so that we can cleanup
         * if the tunnel socket goes away.
         */
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
-       tunnel->sock = sk;
-       tunnel->fd = fd;
        lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
 
        sk->sk_allocation = GFP_ATOMIC;
@@ -1614,11 +1565,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
 
        /* Add tunnel to our list */
        INIT_LIST_HEAD(&tunnel->list);
-
-       /* Bump the reference count. The tunnel context is deleted
-        * only when this drops to zero. Must be done before list insertion
-        */
-       refcount_set(&tunnel->ref_count, 1);
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
        list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1659,8 +1605,6 @@ void l2tp_session_free(struct l2tp_session *session)
 
        if (tunnel) {
                BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-               sock_put(tunnel->sock);
-               session->tunnel = NULL;
                l2tp_tunnel_dec_refcount(tunnel);
        }
 
index 9bbee90..a1aa955 100644 (file)
@@ -214,27 +214,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
        return &session->priv[0];
 }
 
-static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
-{
-       struct l2tp_tunnel *tunnel;
-
-       if (sk == NULL)
-               return NULL;
-
-       sock_hold(sk);
-       tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
-       if (tunnel == NULL) {
-               sock_put(sk);
-               goto out;
-       }
-
-       BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
-out:
-       return tunnel;
-}
-
 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
+void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 
 struct l2tp_session *l2tp_session_get(const struct net *net,
                                      struct l2tp_tunnel *tunnel,
@@ -283,7 +264,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
 static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
 {
        if (refcount_dec_and_test(&tunnel->ref_count))
-               kfree_rcu(tunnel, rcu);
+               l2tp_tunnel_free(tunnel);
 }
 
 /* Session reference counts. Incremented when code obtains a reference
index 4614585..a9c05b2 100644 (file)
@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
 static void l2tp_ip_destroy_sock(struct sock *sk)
 {
        struct sk_buff *skb;
-       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       struct l2tp_tunnel *tunnel = sk->sk_user_data;
 
        while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
                kfree_skb(skb);
 
-       if (tunnel) {
-               l2tp_tunnel_closeall(tunnel);
-               sock_put(sk);
-       }
-
-       sk_refcnt_debug_dec(sk);
+       if (tunnel)
+               l2tp_tunnel_delete(tunnel);
 }
 
 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
index efea58b..9573691 100644 (file)
@@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
 
 static void l2tp_ip6_destroy_sock(struct sock *sk)
 {
-       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       struct l2tp_tunnel *tunnel = sk->sk_user_data;
 
        lock_sock(sk);
        ip6_flush_pending_frames(sk);
        release_sock(sk);
 
-       if (tunnel) {
-               l2tp_tunnel_closeall(tunnel);
-               sock_put(sk);
-       }
+       if (tunnel)
+               l2tp_tunnel_delete(tunnel);
 
        inet6_destroy_sock(sk);
 }
index 0c4f49a..977bca6 100644 (file)
@@ -416,20 +416,28 @@ abort:
  * Session (and tunnel control) socket create/destroy.
  *****************************************************************************/
 
+static void pppol2tp_put_sk(struct rcu_head *head)
+{
+       struct pppol2tp_session *ps;
+
+       ps = container_of(head, typeof(*ps), rcu);
+       sock_put(ps->__sk);
+}
+
 /* Called by l2tp_core when a session socket is being closed.
  */
 static void pppol2tp_session_close(struct l2tp_session *session)
 {
-       struct sock *sk;
-
-       BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+       struct pppol2tp_session *ps;
 
-       sk = pppol2tp_session_get_sock(session);
-       if (sk) {
-               if (sk->sk_socket)
-                       inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
-               sock_put(sk);
-       }
+       ps = l2tp_session_priv(session);
+       mutex_lock(&ps->sk_lock);
+       ps->__sk = rcu_dereference_protected(ps->sk,
+                                            lockdep_is_held(&ps->sk_lock));
+       RCU_INIT_POINTER(ps->sk, NULL);
+       if (ps->__sk)
+               call_rcu(&ps->rcu, pppol2tp_put_sk);
+       mutex_unlock(&ps->sk_lock);
 }
 
 /* Really kill the session socket. (Called from sock_put() if
@@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
        }
 }
 
-static void pppol2tp_put_sk(struct rcu_head *head)
-{
-       struct pppol2tp_session *ps;
-
-       ps = container_of(head, typeof(*ps), rcu);
-       sock_put(ps->__sk);
-}
-
 /* Called when the PPPoX socket (session) is closed.
  */
 static int pppol2tp_release(struct socket *sock)
@@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock)
        sock_orphan(sk);
        sock->sk = NULL;
 
+       /* If the socket is associated with a session,
+        * l2tp_session_delete will call pppol2tp_session_close which
+        * will drop the session's ref on the socket.
+        */
        session = pppol2tp_sock_to_session(sk);
-
-       if (session != NULL) {
-               struct pppol2tp_session *ps;
-
+       if (session) {
                l2tp_session_delete(session);
-
-               ps = l2tp_session_priv(session);
-               mutex_lock(&ps->sk_lock);
-               ps->__sk = rcu_dereference_protected(ps->sk,
-                                                    lockdep_is_held(&ps->sk_lock));
-               RCU_INIT_POINTER(ps->sk, NULL);
-               mutex_unlock(&ps->sk_lock);
-               call_rcu(&ps->rcu, pppol2tp_put_sk);
-
-               /* Rely on the sock_put() call at the end of the function for
-                * dropping the reference held by pppol2tp_sock_to_session().
-                * The last reference will be dropped by pppol2tp_put_sk().
-                */
+               /* drop the ref obtained by pppol2tp_sock_to_session */
+               sock_put(sk);
        }
+
        release_sock(sk);
 
        /* This will delete the session context via
@@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
 
 out_no_ppp:
        /* This is how we get the session context from the socket. */
+       sock_hold(sk);
        sk->sk_user_data = session;
        rcu_assign_pointer(ps->sk, sk);
        mutex_unlock(&ps->sk_lock);
index de7d107..d017432 100644 (file)
@@ -3965,7 +3965,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
        if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
                                              IEEE80211_FCTL_TODS)) !=
            fast_rx->expected_ds_bits)
-               goto drop;
+               return false;
 
        /* assign the key to drop unencrypted frames (later)
         * and strip the IV/MIC if necessary
index 7643178..933c67b 100644 (file)
@@ -3569,6 +3569,14 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        if (!IS_ERR_OR_NULL(sta)) {
                struct ieee80211_fast_tx *fast_tx;
 
+               /* We need a bit of data queued to build aggregates properly, so
+                * instruct the TCP stack to allow more than a single ms of data
+                * to be queued in the stack. The value is a bit-shift of 1
+                * second, so 8 is ~4ms of queued data. Only affects local TCP
+                * sockets.
+                */
+               sk_pacing_shift_update(skb->sk, 8);
+
                fast_tx = rcu_dereference(sta->fast_tx);
 
                if (fast_tx &&
index e545a3c..7a4de6d 100644 (file)
@@ -122,7 +122,7 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
        if (skb->len <= mtu)
                return false;
 
-       if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+       if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
                return false;
 
        return true;
index 3e17d32..58d5d05 100644 (file)
@@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                buf_len = strlen(buf);
 
                ct = nf_ct_get(skb, &ctinfo);
-               if (ct && (ct->status & IPS_NAT_MASK)) {
+               if (ct) {
                        bool mangled;
 
                        /* If mangling fails this function will return 0
index 8b9fe30..558593e 100644 (file)
@@ -5037,9 +5037,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        const struct nf_flowtable_type *type;
+       struct nft_flowtable *flowtable, *ft;
        u8 genmask = nft_genmask_next(net);
        int family = nfmsg->nfgen_family;
-       struct nft_flowtable *flowtable;
        struct nft_table *table;
        struct nft_ctx ctx;
        int err, i, k;
@@ -5099,6 +5099,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
                goto err3;
 
        for (i = 0; i < flowtable->ops_len; i++) {
+               if (!flowtable->ops[i].dev)
+                       continue;
+
+               list_for_each_entry(ft, &table->flowtables, list) {
+                       for (k = 0; k < ft->ops_len; k++) {
+                               if (!ft->ops[k].dev)
+                                       continue;
+
+                               if (flowtable->ops[i].dev == ft->ops[k].dev &&
+                                   flowtable->ops[i].pf == ft->ops[k].pf) {
+                                       err = -EBUSY;
+                                       goto err4;
+                               }
+                       }
+               }
+
                err = nf_register_net_hook(net, &flowtable->ops[i]);
                if (err < 0)
                        goto err4;
@@ -5120,7 +5136,7 @@ err5:
        i = flowtable->ops_len;
 err4:
        for (k = i - 1; k >= 0; k--)
-               nf_unregister_net_hook(net, &flowtable->ops[i]);
+               nf_unregister_net_hook(net, &flowtable->ops[k]);
 
        kfree(flowtable->ops);
 err3:
@@ -5145,6 +5161,11 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
        struct nft_table *table;
        struct nft_ctx ctx;
 
+       if (!nla[NFTA_FLOWTABLE_TABLE] ||
+           (!nla[NFTA_FLOWTABLE_NAME] &&
+            !nla[NFTA_FLOWTABLE_HANDLE]))
+               return -EINVAL;
+
        table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE],
                                       family, genmask);
        if (IS_ERR(table))
index 50615d5..9cf089b 100644 (file)
@@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = {
 
 module_rpmsg_driver(qcom_smd_qrtr_driver);
 
+MODULE_ALIAS("rpmsg:IPCRTR");
 MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
 MODULE_LICENSE("GPL v2");
index c061d6e..2257118 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Oracle.  All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock)
        if (ret)
                goto out;
 
-       new_sock->type = sock->type;
-       new_sock->ops = sock->ops;
        ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
        if (ret < 0)
                goto out;
 
+       /* sock_create_lite() does not get a hold on the owner module so we
+        * need to do it here.  Note that sock_release() uses sock->ops to
+        * determine if it needs to decrement the reference count.  So set
+        * sock->ops after calling accept() in case that fails.  And there's
+        * no need to do try_module_get() as the listener should have a hold
+        * already.
+        */
+       new_sock->ops = sock->ops;
+       __module_get(new_sock->ops->owner);
+
        ret = rds_tcp_keepalive(new_sock);
        if (ret < 0)
                goto out;
index 229172d..03225a8 100644 (file)
@@ -188,7 +188,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        int ret;
 
        if (qdisc_pkt_len(skb) > q->max_size) {
-               if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
+               if (skb_is_gso(skb) &&
+                   skb_gso_validate_mac_len(skb, q->max_size))
                        return tbf_segment(skb, sch, to_free);
                return qdisc_drop(skb, sch, to_free);
        }
index 26684e0..2c6f4e0 100644 (file)
@@ -1398,8 +1398,10 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
        smc->use_fallback = false; /* assume rdma capability first */
        rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
                              IPPROTO_TCP, &smc->clcsock);
-       if (rc)
+       if (rc) {
                sk_common_release(sk);
+               goto out;
+       }
        smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
        smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
 
index 3cd086e..b42395d 100644 (file)
@@ -269,7 +269,7 @@ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
 
        if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
                return; /* short message */
-       if (cdc->len != sizeof(*cdc))
+       if (cdc->len != SMC_WR_TX_SIZE)
                return; /* invalid message */
        smc_cdc_msg_recv(cdc, link, wc->wr_id);
 }
index 702ce5f..f76f60e 100644 (file)
@@ -177,6 +177,7 @@ static int smc_lgr_create(struct smc_sock *smc,
        lnk = &lgr->lnk[SMC_SINGLE_LINK];
        /* initialize link */
        lnk->state = SMC_LNK_ACTIVATING;
+       lnk->link_id = SMC_SINGLE_LINK;
        lnk->smcibdev = smcibdev;
        lnk->ibport = ibport;
        lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@@ -468,7 +469,7 @@ create:
                rc = smc_link_determine_gid(conn->lgr);
        }
        conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
-       conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg);
+       conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
 #ifndef KERNEL_HAS_ATOMIC64
        spin_lock_init(&conn->acurs_lock);
 #endif
index 54e8d6d..ea4b219 100644 (file)
@@ -206,7 +206,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
        memcpy(confllc->sender_mac, mac, ETH_ALEN);
        memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
        hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
-       /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */
+       confllc->link_num = link->link_id;
        memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
        confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */
        /* send llc message */
index 03086cc..d7a7bef 100644 (file)
@@ -189,6 +189,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
        grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
        grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
        grp->open = group_is_open;
+       *grp->open = false;
        filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
        if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
                                    filter, &grp->subid))
index f934771..8b04e60 100644 (file)
@@ -473,6 +473,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sk->sk_write_space = tipc_write_space;
        sk->sk_destruct = tipc_sock_destruct;
        tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       tsk->group_is_open = true;
        atomic_set(&tsk->dupl_rcvcnt, 0);
 
        /* Start out with safe limits until we receive an advertised window */
index e9b4b53..d824d54 100644 (file)
@@ -45,17 +45,27 @@ MODULE_AUTHOR("Mellanox Technologies");
 MODULE_DESCRIPTION("Transport Layer Security Support");
 MODULE_LICENSE("Dual BSD/GPL");
 
+enum {
+       TLSV4,
+       TLSV6,
+       TLS_NUM_PROTS,
+};
+
 enum {
        TLS_BASE_TX,
        TLS_SW_TX,
        TLS_NUM_CONFIG,
 };
 
-static struct proto tls_prots[TLS_NUM_CONFIG];
+static struct proto *saved_tcpv6_prot;
+static DEFINE_MUTEX(tcpv6_prot_mutex);
+static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
 
 static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
 {
-       sk->sk_prot = &tls_prots[ctx->tx_conf];
+       int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+
+       sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf];
 }
 
 int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -453,8 +463,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
        return do_tls_setsockopt(sk, optname, optval, optlen);
 }
 
+static void build_protos(struct proto *prot, struct proto *base)
+{
+       prot[TLS_BASE_TX] = *base;
+       prot[TLS_BASE_TX].setsockopt    = tls_setsockopt;
+       prot[TLS_BASE_TX].getsockopt    = tls_getsockopt;
+       prot[TLS_BASE_TX].close         = tls_sk_proto_close;
+
+       prot[TLS_SW_TX] = prot[TLS_BASE_TX];
+       prot[TLS_SW_TX].sendmsg         = tls_sw_sendmsg;
+       prot[TLS_SW_TX].sendpage        = tls_sw_sendpage;
+}
+
 static int tls_init(struct sock *sk)
 {
+       int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tls_context *ctx;
        int rc = 0;
@@ -479,6 +502,17 @@ static int tls_init(struct sock *sk)
        ctx->getsockopt = sk->sk_prot->getsockopt;
        ctx->sk_proto_close = sk->sk_prot->close;
 
+       /* Build IPv6 TLS whenever the address of tcpv6_prot changes */
+       if (ip_ver == TLSV6 &&
+           unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+               mutex_lock(&tcpv6_prot_mutex);
+               if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+                       build_protos(tls_prots[TLSV6], sk->sk_prot);
+                       smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+               }
+               mutex_unlock(&tcpv6_prot_mutex);
+       }
+
        ctx->tx_conf = TLS_BASE_TX;
        update_sk_prot(sk, ctx);
 out:
@@ -493,21 +527,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
        .init                   = tls_init,
 };
 
-static void build_protos(struct proto *prot, struct proto *base)
-{
-       prot[TLS_BASE_TX] = *base;
-       prot[TLS_BASE_TX].setsockopt    = tls_setsockopt;
-       prot[TLS_BASE_TX].getsockopt    = tls_getsockopt;
-       prot[TLS_BASE_TX].close         = tls_sk_proto_close;
-
-       prot[TLS_SW_TX] = prot[TLS_BASE_TX];
-       prot[TLS_SW_TX].sendmsg         = tls_sw_sendmsg;
-       prot[TLS_SW_TX].sendpage        = tls_sw_sendpage;
-}
-
 static int __init tls_register(void)
 {
-       build_protos(tls_prots, &tcp_prot);
+       build_protos(tls_prots[TLSV4], &tcp_prot);
 
        tcp_register_ulp(&tcp_tls_ulp_ops);
 
index 1abcc4f..4172204 100644 (file)
@@ -34,9 +34,10 @@ config CFG80211
 
          When built as a module it will be called cfg80211.
 
+if CFG80211
+
 config NL80211_TESTMODE
        bool "nl80211 testmode command"
-       depends on CFG80211
        help
          The nl80211 testmode command helps implementing things like
          factory calibration or validation tools for wireless chips.
@@ -51,7 +52,6 @@ config NL80211_TESTMODE
 
 config CFG80211_DEVELOPER_WARNINGS
        bool "enable developer warnings"
-       depends on CFG80211
        default n
        help
          This option enables some additional warnings that help
@@ -68,7 +68,7 @@ config CFG80211_DEVELOPER_WARNINGS
 
 config CFG80211_CERTIFICATION_ONUS
        bool "cfg80211 certification onus"
-       depends on CFG80211 && EXPERT
+       depends on EXPERT
        default n
        ---help---
          You should disable this option unless you are both capable
@@ -159,7 +159,6 @@ config CFG80211_REG_RELAX_NO_IR
 
 config CFG80211_DEFAULT_PS
        bool "enable powersave by default"
-       depends on CFG80211
        default y
        help
          This option enables powersave mode by default.
@@ -170,7 +169,6 @@ config CFG80211_DEFAULT_PS
 
 config CFG80211_DEBUGFS
        bool "cfg80211 DebugFS entries"
-       depends on CFG80211
        depends on DEBUG_FS
        ---help---
          You can enable this if you want debugfs entries for cfg80211.
@@ -180,7 +178,6 @@ config CFG80211_DEBUGFS
 config CFG80211_CRDA_SUPPORT
        bool "support CRDA" if EXPERT
        default y
-       depends on CFG80211
        help
          You should enable this option unless you know for sure you have no
          need for it, for example when using internal regdb (above) or the
@@ -190,7 +187,6 @@ config CFG80211_CRDA_SUPPORT
 
 config CFG80211_WEXT
        bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
-       depends on CFG80211
        select WEXT_CORE
        default y if CFG80211_WEXT_EXPORT
        help
@@ -199,11 +195,12 @@ config CFG80211_WEXT
 
 config CFG80211_WEXT_EXPORT
        bool
-       depends on CFG80211
        help
          Drivers should select this option if they require cfg80211's
          wext compatibility symbols to be exported.
 
+endif # CFG80211
+
 config LIB80211
        tristate
        default n
index 8e70291..e87d6c4 100644 (file)
@@ -217,7 +217,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
                if (skb->len <= mtu)
                        goto ok;
 
-               if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+               if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
                        goto ok;
        }
 
index 0e349b8..ba942e3 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+ifndef CROSS_COMPILE
 hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
 
 HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
@@ -16,7 +17,6 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include
 bpf-direct-objs := bpf-direct.o
 
 # Try to match the kernel target.
-ifndef CROSS_COMPILE
 ifndef CONFIG_64BIT
 
 # s390 has -m31 flag to build 31 bit binaries
@@ -35,12 +35,4 @@ HOSTLOADLIBES_bpf-fancy += $(MFLAG)
 HOSTLOADLIBES_dropper += $(MFLAG)
 endif
 always := $(hostprogs-m)
-else
-# MIPS system calls are defined based on the -mabi that is passed
-# to the toolchain which may or may not be a valid option
-# for the host toolchain. So disable tests if target architecture
-# is MIPS but the host isn't.
-ifndef CONFIG_MIPS
-always := $(hostprogs-m)
-endif
 endif
index 47cddf3..4f2b25d 100644 (file)
@@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool
 
 objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
 
+objtool_args += $(if $(part-of-module), --module,)
+
 ifndef CONFIG_FRAME_POINTER
 objtool_args += --no-fp
 endif
@@ -264,6 +266,12 @@ objtool_args += --no-unreachable
 else
 objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
 endif
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_CFLAGS),)
+  objtool_args += --retpoline
+endif
+endif
+
 
 ifdef CONFIG_MODVERSIONS
 objtool_o = $(@D)/.tmp_$(@F)
index 1249b72..8fd6437 100644 (file)
@@ -56,10 +56,10 @@ statement S;
 p << r.p;
 @@
 
-coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdep")
+coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdup")
 
 @script:python depends on report@
 p << r.p;
 @@
 
-coccilib.report.print_report(p[0], "WARNING opportunity for kmemdep")
+coccilib.report.print_report(p[0], "WARNING opportunity for kmemdup")
index 9ee9bf7..6579265 100644 (file)
@@ -595,7 +595,7 @@ static void optimize_result(void)
                 * original char code */
                if (!best_table_len[i]) {
 
-                       /* find the token with the breates profit value */
+                       /* find the token with the best profit value */
                        best = find_best_token();
                        if (token_profit[best] == 0)
                                break;
index 5c12dc9..df26c7b 100644 (file)
@@ -178,7 +178,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
        case S_HEX:
        done:
                if (sym_string_valid(sym, p)) {
-                       sym->def[def].val = strdup(p);
+                       sym->def[def].val = xstrdup(p);
                        sym->flags |= def_flags;
                } else {
                        if (def != S_DEF_AUTO)
index 2858738..240880a 100644 (file)
@@ -101,7 +101,7 @@ static struct message *message__new(const char *msg, char *option,
        if (self->files == NULL)
                goto out_fail;
 
-       self->msg = strdup(msg);
+       self->msg = xstrdup(msg);
        if (self->msg == NULL)
                goto out_fail_msg;
 
index 4e23feb..2d5ec2d 100644 (file)
@@ -115,6 +115,7 @@ int file_write_dep(const char *name);
 void *xmalloc(size_t size);
 void *xcalloc(size_t nmemb, size_t size);
 void *xrealloc(void *p, size_t size);
+char *xstrdup(const char *s);
 
 struct gstr {
        size_t len;
index a10bd9d..6c0bcd9 100755 (executable)
@@ -55,7 +55,8 @@ EOF
            echo " *** required header files."                            1>&2
            echo " *** 'make menuconfig' requires the ncurses libraries." 1>&2
            echo " *** "                                                  1>&2
-           echo " *** Install ncurses (ncurses-devel) and try again."    1>&2
+           echo " *** Install ncurses (ncurses-devel or libncurses-dev " 1>&2
+           echo " *** depending on your distribution) and try again."    1>&2
            echo " *** "                                                  1>&2
            exit 1
        fi
index 9922285..36cd3e1 100644 (file)
@@ -212,6 +212,7 @@ void menu_add_option(int token, char *arg)
                        sym_defconfig_list = current_entry->sym;
                else if (sym_defconfig_list != current_entry->sym)
                        zconf_error("trying to redefine defconfig symbol");
+               sym_defconfig_list->flags |= SYMBOL_AUTO;
                break;
        case T_OPT_ENV:
                prop_add_env(arg);
index cca9663..2220bc4 100644 (file)
@@ -183,7 +183,7 @@ static void sym_validate_range(struct symbol *sym)
                sprintf(str, "%lld", val2);
        else
                sprintf(str, "0x%llx", val2);
-       sym->curr.val = strdup(str);
+       sym->curr.val = xstrdup(str);
 }
 
 static void sym_set_changed(struct symbol *sym)
@@ -849,7 +849,7 @@ struct symbol *sym_lookup(const char *name, int flags)
                                   : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE))))
                                return symbol;
                }
-               new_name = strdup(name);
+               new_name = xstrdup(name);
        } else {
                new_name = NULL;
                hash = 0;
index b98a79e..c6f6e21 100644 (file)
@@ -154,3 +154,14 @@ void *xrealloc(void *p, size_t size)
        fprintf(stderr, "Out of memory.\n");
        exit(1);
 }
+
+char *xstrdup(const char *s)
+{
+       char *p;
+
+       p = strdup(s);
+       if (p)
+               return p;
+       fprintf(stderr, "Out of memory.\n");
+       exit(1);
+}
index 02de6fe..88b650e 100644 (file)
@@ -332,16 +332,12 @@ void zconf_nextfile(const char *name)
                                "Inclusion path:\n  current file : '%s'\n",
                                zconf_curname(), zconf_lineno(),
                                zconf_curname());
-                       iter = current_file->parent;
-                       while (iter && \
-                              strcmp(iter->name,current_file->name)) {
-                               fprintf(stderr, "  included from: '%s:%d'\n",
-                                       iter->name, iter->lineno-1);
+                       iter = current_file;
+                       do {
                                iter = iter->parent;
-                       }
-                       if (iter)
                                fprintf(stderr, "  included from: '%s:%d'\n",
-                                       iter->name, iter->lineno+1);
+                                       iter->name, iter->lineno - 1);
+                       } while (strcmp(iter->name, current_file->name));
                        exit(1);
                }
        }
index 4be9805..ad6305b 100644 (file)
@@ -127,7 +127,7 @@ no_mainmenu_stmt: /* empty */
         * later regardless of whether it comes from the 'prompt' in
         * mainmenu_stmt or here
         */
-       menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
+       menu_add_prompt(P_MENU, xstrdup("Linux Kernel Configuration"), NULL);
 };
 
 
@@ -276,6 +276,7 @@ choice: T_CHOICE word_opt T_EOL
        sym->flags |= SYMBOL_AUTO;
        menu_add_entry(sym);
        menu_add_expr(P_CHOICE, NULL, NULL);
+       free($2);
        printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
 };
 
index c0d129d..be56a11 100755 (executable)
@@ -246,7 +246,7 @@ else
 fi;
 
 # final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
 
 archive_builtin
 
index 0b3026d..8a77620 100644 (file)
@@ -889,7 +889,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
 
        index_offset = snd_ctl_get_ioff(kctl, &control->id);
        vd = &kctl->vd[index_offset];
-       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
+       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
                return -EPERM;
 
        snd_ctl_build_ioff(&control->id, kctl, index_offset);
index c71dcac..96143df 100644 (file)
@@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
 };
 #define param_check_xint param_check_int
 
-static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+static int power_save = -1;
 module_param(power_save, xint, 0644);
 MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
                 "(in second, 0 = disable).");
@@ -2186,6 +2186,24 @@ out_free:
        return err;
 }
 
+#ifdef CONFIG_PM
+/* On some boards setting power_save to a non 0 value leads to clicking /
+ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
+ * figure out how to avoid these sounds, but that is not always feasible.
+ * So we keep a list of devices where we disable powersaving as its known
+ * to causes problems on these devices.
+ */
+static struct snd_pci_quirk power_save_blacklist[] = {
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+       SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+       {}
+};
+#endif /* CONFIG_PM */
+
 /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
 static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
        [AZX_DRIVER_NVIDIA] = 8,
@@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
        int dev = chip->dev_index;
+       int val;
        int err;
 
        hda->probe_continued = 1;
@@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
 
        chip->running = 1;
        azx_add_card_list(chip);
-       snd_hda_set_power_save(&chip->bus, power_save * 1000);
+
+       val = power_save;
+#ifdef CONFIG_PM
+       if (val == -1) {
+               const struct snd_pci_quirk *q;
+
+               val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+               if (q && val) {
+                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+                                q->subvendor, q->subdevice);
+                       val = 0;
+               }
+       }
+#endif /* CONFIG_PM */
+       snd_hda_set_power_save(&chip->bus, val * 1000);
        if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
                pm_runtime_put_autosuspend(&pci->dev);
 
index ce28f7c..b9c93fa 100644 (file)
@@ -4997,13 +4997,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+               snd_hda_apply_pincfgs(codec, pincfgs);
+       } else if (action == HDA_FIXUP_ACT_INIT) {
                /* Enable DOCK device */
                snd_hda_codec_write(codec, 0x17, 0,
                            AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
                /* Enable DOCK device */
                snd_hda_codec_write(codec, 0x19, 0,
                            AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
-               snd_hda_apply_pincfgs(codec, pincfgs);
        }
 }
 
index 5025204..754e632 100644 (file)
@@ -3325,4 +3325,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
        }
 },
 
+{
+       /*
+        * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
+        * even though it advertises more. The capture interface doesn't work
+        * even on windows.
+        */
+       USB_DEVICE(0x19b5, 0x0021),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       /* Capture */
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_IGNORE_INTERFACE,
+                       },
+                       /* Playback */
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 2,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+                                               UAC_EP_CS_ATTR_SAMPLE_RATE,
+                                       .endpoint = 0x03,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) {
+                                               48000
+                                       }
+                               }
+                       },
+               }
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index a095150..4ed9d0c 100644 (file)
@@ -50,6 +50,7 @@
 /*standard module options for ALSA. This module supports only one card*/
 static int hdmi_card_index = SNDRV_DEFAULT_IDX1;
 static char *hdmi_card_id = SNDRV_DEFAULT_STR1;
+static bool single_port;
 
 module_param_named(index, hdmi_card_index, int, 0444);
 MODULE_PARM_DESC(index,
@@ -57,6 +58,9 @@ MODULE_PARM_DESC(index,
 module_param_named(id, hdmi_card_id, charp, 0444);
 MODULE_PARM_DESC(id,
                "ID string for INTEL Intel HDMI Audio controller.");
+module_param(single_port, bool, 0444);
+MODULE_PARM_DESC(single_port,
+               "Single-port mode (for compatibility)");
 
 /*
  * ELD SA bits in the CEA Speaker Allocation data block
@@ -1579,7 +1583,11 @@ static irqreturn_t display_pipe_interrupt_handler(int irq, void *dev_id)
 static void notify_audio_lpe(struct platform_device *pdev, int port)
 {
        struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev);
-       struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+       struct snd_intelhad *ctx;
+
+       ctx = &card_ctx->pcm_ctx[single_port ? 0 : port];
+       if (single_port)
+               ctx->port = port;
 
        schedule_work(&ctx->hdmi_audio_wq);
 }
@@ -1743,6 +1751,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
 {
        struct snd_card *card;
        struct snd_intelhad_card *card_ctx;
+       struct snd_intelhad *ctx;
        struct snd_pcm *pcm;
        struct intel_hdmi_lpe_audio_pdata *pdata;
        int irq;
@@ -1787,6 +1796,21 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, card_ctx);
 
+       card_ctx->num_pipes = pdata->num_pipes;
+       card_ctx->num_ports = single_port ? 1 : pdata->num_ports;
+
+       for_each_port(card_ctx, port) {
+               ctx = &card_ctx->pcm_ctx[port];
+               ctx->card_ctx = card_ctx;
+               ctx->dev = card_ctx->dev;
+               ctx->port = single_port ? -1 : port;
+               ctx->pipe = -1;
+
+               spin_lock_init(&ctx->had_spinlock);
+               mutex_init(&ctx->mutex);
+               INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
+       }
+
        dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n",
                __func__, (unsigned int)res_mmio->start,
                (unsigned int)res_mmio->end);
@@ -1816,19 +1840,12 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
        init_channel_allocations();
 
        card_ctx->num_pipes = pdata->num_pipes;
-       card_ctx->num_ports = pdata->num_ports;
+       card_ctx->num_ports = single_port ? 1 : pdata->num_ports;
 
        for_each_port(card_ctx, port) {
-               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
                int i;
 
-               ctx->card_ctx = card_ctx;
-               ctx->dev = card_ctx->dev;
-               ctx->port = port;
-               ctx->pipe = -1;
-
-               INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
-
+               ctx = &card_ctx->pcm_ctx[port];
                ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
                                  MAX_CAP_STREAMS, &pcm);
                if (ret)
index a5684d0..5898c22 100755 (executable)
@@ -33,7 +33,7 @@ import resource
 import struct
 import re
 import subprocess
-from collections import defaultdict
+from collections import defaultdict, namedtuple
 
 VMX_EXIT_REASONS = {
     'EXCEPTION_NMI':        0,
@@ -228,6 +228,7 @@ IOCTL_NUMBERS = {
 }
 
 ENCODING = locale.getpreferredencoding(False)
+TRACE_FILTER = re.compile(r'^[^\(]*$')
 
 
 class Arch(object):
@@ -260,6 +261,11 @@ class Arch(object):
                     return ArchX86(SVM_EXIT_REASONS)
                 return
 
+    def tracepoint_is_child(self, field):
+        if (TRACE_FILTER.match(field)):
+            return None
+        return field.split('(', 1)[0]
+
 
 class ArchX86(Arch):
     def __init__(self, exit_reasons):
@@ -267,6 +273,10 @@ class ArchX86(Arch):
         self.ioctl_numbers = IOCTL_NUMBERS
         self.exit_reasons = exit_reasons
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        return None
+
 
 class ArchPPC(Arch):
     def __init__(self):
@@ -282,6 +292,10 @@ class ArchPPC(Arch):
         self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
         self.exit_reasons = {}
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        return None
+
 
 class ArchA64(Arch):
     def __init__(self):
@@ -289,6 +303,10 @@ class ArchA64(Arch):
         self.ioctl_numbers = IOCTL_NUMBERS
         self.exit_reasons = AARCH64_EXIT_REASONS
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        return None
+
 
 class ArchS390(Arch):
     def __init__(self):
@@ -296,6 +314,12 @@ class ArchS390(Arch):
         self.ioctl_numbers = IOCTL_NUMBERS
         self.exit_reasons = None
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        if field.startswith('instruction_'):
+            return 'exit_instruction'
+
+
 ARCH = Arch.get_arch()
 
 
@@ -331,9 +355,6 @@ class perf_event_attr(ctypes.Structure):
 PERF_TYPE_TRACEPOINT = 2
 PERF_FORMAT_GROUP = 1 << 3
 
-PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing'
-PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm'
-
 
 class Group(object):
     """Represents a perf event group."""
@@ -376,8 +397,8 @@ class Event(object):
         self.syscall = self.libc.syscall
         self.name = name
         self.fd = None
-        self.setup_event(group, trace_cpu, trace_pid, trace_point,
-                         trace_filter, trace_set)
+        self._setup_event(group, trace_cpu, trace_pid, trace_point,
+                          trace_filter, trace_set)
 
     def __del__(self):
         """Closes the event's file descriptor.
@@ -390,7 +411,7 @@ class Event(object):
         if self.fd:
             os.close(self.fd)
 
-    def perf_event_open(self, attr, pid, cpu, group_fd, flags):
+    def _perf_event_open(self, attr, pid, cpu, group_fd, flags):
         """Wrapper for the sys_perf_evt_open() syscall.
 
         Used to set up performance events, returns a file descriptor or -1
@@ -409,7 +430,7 @@ class Event(object):
                             ctypes.c_int(pid), ctypes.c_int(cpu),
                             ctypes.c_int(group_fd), ctypes.c_long(flags))
 
-    def setup_event_attribute(self, trace_set, trace_point):
+    def _setup_event_attribute(self, trace_set, trace_point):
         """Returns an initialized ctype perf_event_attr struct."""
 
         id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set,
@@ -419,8 +440,8 @@ class Event(object):
         event_attr.config = int(open(id_path).read())
         return event_attr
 
-    def setup_event(self, group, trace_cpu, trace_pid, trace_point,
-                    trace_filter, trace_set):
+    def _setup_event(self, group, trace_cpu, trace_pid, trace_point,
+                     trace_filter, trace_set):
         """Sets up the perf event in Linux.
 
         Issues the syscall to register the event in the kernel and
@@ -428,7 +449,7 @@ class Event(object):
 
         """
 
-        event_attr = self.setup_event_attribute(trace_set, trace_point)
+        event_attr = self._setup_event_attribute(trace_set, trace_point)
 
         # First event will be group leader.
         group_leader = -1
@@ -437,8 +458,8 @@ class Event(object):
         if group.events:
             group_leader = group.events[0].fd
 
-        fd = self.perf_event_open(event_attr, trace_pid,
-                                  trace_cpu, group_leader, 0)
+        fd = self._perf_event_open(event_attr, trace_pid,
+                                   trace_cpu, group_leader, 0)
         if fd == -1:
             err = ctypes.get_errno()
             raise OSError(err, os.strerror(err),
@@ -475,6 +496,10 @@ class Event(object):
 
 class Provider(object):
     """Encapsulates functionalities used by all providers."""
+    def __init__(self, pid):
+        self.child_events = False
+        self.pid = pid
+
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
@@ -500,12 +525,12 @@ class TracepointProvider(Provider):
     """
     def __init__(self, pid, fields_filter):
         self.group_leaders = []
-        self.filters = self.get_filters()
+        self.filters = self._get_filters()
         self.update_fields(fields_filter)
-        self.pid = pid
+        super(TracepointProvider, self).__init__(pid)
 
     @staticmethod
-    def get_filters():
+    def _get_filters():
         """Returns a dict of trace events, their filter ids and
         the values that can be filtered.
 
@@ -521,8 +546,8 @@ class TracepointProvider(Provider):
             filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
         return filters
 
-    def get_available_fields(self):
-        """Returns a list of available event's of format 'event name(filter
+    def _get_available_fields(self):
+        """Returns a list of available events of format 'event name(filter
         name)'.
 
         All available events have directories under
@@ -549,11 +574,12 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self.fields = [field for field in self.get_available_fields()
-                       if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self._get_available_fields()
+                       if self.is_field_wanted(fields_filter, field) or
+                       ARCH.tracepoint_is_child(field)]
 
     @staticmethod
-    def get_online_cpus():
+    def _get_online_cpus():
         """Returns a list of cpu id integers."""
         def parse_int_list(list_string):
             """Returns an int list from a string of comma separated integers and
@@ -575,17 +601,17 @@ class TracepointProvider(Provider):
             cpu_string = cpu_list.readline()
             return parse_int_list(cpu_string)
 
-    def setup_traces(self):
+    def _setup_traces(self):
         """Creates all event and group objects needed to be able to retrieve
         data."""
-        fields = self.get_available_fields()
+        fields = self._get_available_fields()
         if self._pid > 0:
             # Fetch list of all threads of the monitored pid, as qemu
             # starts a thread for each vcpu.
             path = os.path.join('/proc', str(self._pid), 'task')
             groupids = self.walkdir(path)[1]
         else:
-            groupids = self.get_online_cpus()
+            groupids = self._get_online_cpus()
 
         # The constant is needed as a buffer for python libs, std
         # streams and other files that the script opens.
@@ -663,7 +689,7 @@ class TracepointProvider(Provider):
         # The garbage collector will get rid of all Event/Group
         # objects and open files after removing the references.
         self.group_leaders = []
-        self.setup_traces()
+        self._setup_traces()
         self.fields = self._fields
 
     def read(self, by_guest=0):
@@ -671,8 +697,12 @@ class TracepointProvider(Provider):
         ret = defaultdict(int)
         for group in self.group_leaders:
             for name, val in group.read().items():
-                if name in self._fields:
-                    ret[name] += val
+                if name not in self._fields:
+                    continue
+                parent = ARCH.tracepoint_is_child(name)
+                if parent:
+                    name += ' ' + parent
+                ret[name] += val
         return ret
 
     def reset(self):
@@ -690,11 +720,11 @@ class DebugfsProvider(Provider):
         self._baseline = {}
         self.do_read = True
         self.paths = []
-        self.pid = pid
+        super(DebugfsProvider, self).__init__(pid)
         if include_past:
-            self.restore()
+            self._restore()
 
-    def get_available_fields(self):
+    def _get_available_fields(self):
         """"Returns a list of available fields.
 
         The fields are all available KVM debugfs files
@@ -704,8 +734,9 @@ class DebugfsProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self._fields = [field for field in self._get_available_fields()
+                        if self.is_field_wanted(fields_filter, field) or
+                        ARCH.debugfs_is_child(field)]
 
     @property
     def fields(self):
@@ -758,7 +789,7 @@ class DebugfsProvider(Provider):
                     paths.append(dir)
         for path in paths:
             for field in self._fields:
-                value = self.read_field(field, path)
+                value = self._read_field(field, path)
                 key = path + field
                 if reset == 1:
                     self._baseline[key] = value
@@ -766,20 +797,21 @@ class DebugfsProvider(Provider):
                     self._baseline[key] = 0
                 if self._baseline.get(key, -1) == -1:
                     self._baseline[key] = value
-                increment = (results.get(field, 0) + value -
-                             self._baseline.get(key, 0))
-                if by_guest:
-                    pid = key.split('-')[0]
-                    if pid in results:
-                        results[pid] += increment
-                    else:
-                        results[pid] = increment
+                parent = ARCH.debugfs_is_child(field)
+                if parent:
+                    field = field + ' ' + parent
+                else:
+                    if by_guest:
+                        field = key.split('-')[0]    # set 'field' to 'pid'
+                increment = value - self._baseline.get(key, 0)
+                if field in results:
+                    results[field] += increment
                 else:
                     results[field] = increment
 
         return results
 
-    def read_field(self, field, path):
+    def _read_field(self, field, path):
         """Returns the value of a single field from a specific VM."""
         try:
             return int(open(os.path.join(PATH_DEBUGFS_KVM,
@@ -794,12 +826,15 @@ class DebugfsProvider(Provider):
         self._baseline = {}
         self.read(1)
 
-    def restore(self):
+    def _restore(self):
         """Reset field counters"""
         self._baseline = {}
         self.read(2)
 
 
+EventStat = namedtuple('EventStat', ['value', 'delta'])
+
+
 class Stats(object):
     """Manages the data providers and the data they provide.
 
@@ -808,13 +843,13 @@ class Stats(object):
 
     """
     def __init__(self, options):
-        self.providers = self.get_providers(options)
+        self.providers = self._get_providers(options)
         self._pid_filter = options.pid
         self._fields_filter = options.fields
         self.values = {}
+        self._child_events = False
 
-    @staticmethod
-    def get_providers(options):
+    def _get_providers(self, options):
         """Returns a list of data providers depending on the passed options."""
         providers = []
 
@@ -826,7 +861,7 @@ class Stats(object):
 
         return providers
 
-    def update_provider_filters(self):
+    def _update_provider_filters(self):
         """Propagates fields filters to providers."""
         # As we reset the counters when updating the fields we can
         # also clear the cache of old values.
@@ -847,7 +882,7 @@ class Stats(object):
     def fields_filter(self, fields_filter):
         if fields_filter != self._fields_filter:
             self._fields_filter = fields_filter
-            self.update_provider_filters()
+            self._update_provider_filters()
 
     @property
     def pid_filter(self):
@@ -861,16 +896,33 @@ class Stats(object):
             for provider in self.providers:
                 provider.pid = self._pid_filter
 
+    @property
+    def child_events(self):
+        return self._child_events
+
+    @child_events.setter
+    def child_events(self, val):
+        self._child_events = val
+        for provider in self.providers:
+            provider.child_events = val
+
     def get(self, by_guest=0):
         """Returns a dict with field -> (value, delta to last value) of all
-        provider data."""
+        provider data.
+        Key formats:
+          * plain: 'key' is event name
+          * child-parent: 'key' is in format '<child> <parent>'
+          * pid: 'key' is the pid of the guest, and the record contains the
+               aggregated event data
+        These formats are generated by the providers, and handled in class TUI.
+        """
         for provider in self.providers:
             new = provider.read(by_guest=by_guest)
-            for key in new if by_guest else provider.fields:
-                oldval = self.values.get(key, (0, 0))[0]
+            for key in new:
+                oldval = self.values.get(key, EventStat(0, 0)).value
                 newval = new.get(key, 0)
                 newdelta = newval - oldval
-                self.values[key] = (newval, newdelta)
+                self.values[key] = EventStat(newval, newdelta)
         return self.values
 
     def toggle_display_guests(self, to_pid):
@@ -899,10 +951,10 @@ class Stats(object):
         self.get(to_pid)
         return 0
 
+
 DELAY_DEFAULT = 3.0
 MAX_GUEST_NAME_LEN = 48
 MAX_REGEX_LEN = 44
-DEFAULT_REGEX = r'^[^\(]*$'
 SORT_DEFAULT = 0
 
 
@@ -969,7 +1021,7 @@ class Tui(object):
 
         return res
 
-    def print_all_gnames(self, row):
+    def _print_all_gnames(self, row):
         """Print a list of all running guests along with their pids."""
         self.screen.addstr(row, 2, '%8s  %-60s' %
                            ('Pid', 'Guest Name (fuzzy list, might be '
@@ -1032,19 +1084,13 @@ class Tui(object):
 
         return name
 
-    def update_drilldown(self):
-        """Sets or removes a filter that only allows fields without braces."""
-        if not self.stats.fields_filter:
-            self.stats.fields_filter = DEFAULT_REGEX
-
-        elif self.stats.fields_filter == DEFAULT_REGEX:
-            self.stats.fields_filter = None
-
-    def update_pid(self, pid):
+    def _update_pid(self, pid):
         """Propagates pid selection to stats object."""
+        self.screen.addstr(4, 1, 'Updating pid filter...')
+        self.screen.refresh()
         self.stats.pid_filter = pid
 
-    def refresh_header(self, pid=None):
+    def _refresh_header(self, pid=None):
         """Refreshes the header."""
         if pid is None:
             pid = self.stats.pid_filter
@@ -1059,8 +1105,7 @@ class Tui(object):
                                .format(pid, gname), curses.A_BOLD)
         else:
             self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
-        if self.stats.fields_filter and self.stats.fields_filter \
-           != DEFAULT_REGEX:
+        if self.stats.fields_filter:
             regex = self.stats.fields_filter
             if len(regex) > MAX_REGEX_LEN:
                 regex = regex[:MAX_REGEX_LEN] + '...'
@@ -1075,56 +1120,99 @@ class Tui(object):
         self.screen.addstr(4, 1, 'Collecting data...')
         self.screen.refresh()
 
-    def refresh_body(self, sleeptime):
+    def _refresh_body(self, sleeptime):
+        def is_child_field(field):
+            return field.find('(') != -1
+
+        def insert_child(sorted_items, child, values, parent):
+            num = len(sorted_items)
+            for i in range(0, num):
+                # only add child if parent is present
+                if parent.startswith(sorted_items[i][0]):
+                    sorted_items.insert(i + 1, ('  ' + child, values))
+
+        def get_sorted_events(self, stats):
+            """ separate parent and child events """
+            if self._sorting == SORT_DEFAULT:
+                def sortkey((_k, v)):
+                    # sort by (delta value, overall value)
+                    return (v.delta, v.value)
+            else:
+                def sortkey((_k, v)):
+                    # sort by overall value
+                    return v.value
+
+            childs = []
+            sorted_items = []
+            # we can't rule out child events to appear prior to parents even
+            # when sorted - separate out all children first, and add in later
+            for key, values in sorted(stats.items(), key=sortkey,
+                                      reverse=True):
+                if values == (0, 0):
+                    continue
+                if key.find(' ') != -1:
+                    if not self.stats.child_events:
+                        continue
+                    childs.insert(0, (key, values))
+                else:
+                    sorted_items.append((key, values))
+            if self.stats.child_events:
+                for key, values in childs:
+                    (child, parent) = key.split(' ')
+                    insert_child(sorted_items, child, values, parent)
+
+            return sorted_items
+
         row = 3
         self.screen.move(row, 0)
         self.screen.clrtobot()
         stats = self.stats.get(self._display_guests)
-
-        def sortCurAvg(x):
-            # sort by current events if available
-            if stats[x][1]:
-                return (-stats[x][1], -stats[x][0])
+        total = 0.
+        ctotal = 0.
+        for key, values in stats.items():
+            if self._display_guests:
+                if self.get_gname_from_pid(key):
+                    total += values.value
+                continue
+            if not key.find(' ') != -1:
+                total += values.value
             else:
-                return (0, -stats[x][0])
+                ctotal += values.value
+        if total == 0.:
+            # we don't have any fields, or all non-child events are filtered
+            total = ctotal
 
-        def sortTotal(x):
-            # sort by totals
-            return (0, -stats[x][0])
-        total = 0.
-        for key in stats.keys():
-            if key.find('(') is -1:
-                total += stats[key][0]
-        if self._sorting == SORT_DEFAULT:
-            sortkey = sortCurAvg
-        else:
-            sortkey = sortTotal
+        # print events
         tavg = 0
-        for key in sorted(stats.keys(), key=sortkey):
-            if row >= self.screen.getmaxyx()[0] - 1:
-                break
-            values = stats[key]
-            if not values[0] and not values[1]:
+        tcur = 0
+        for key, values in get_sorted_events(self, stats):
+            if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
                 break
-            if values[0] is not None:
-                cur = int(round(values[1] / sleeptime)) if values[1] else ''
-                if self._display_guests:
-                    key = self.get_gname_from_pid(key)
-                self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
-                                   (key, values[0], values[0] * 100 / total,
-                                    cur))
-                if cur is not '' and key.find('(') is -1:
-                    tavg += cur
+            if self._display_guests:
+                key = self.get_gname_from_pid(key)
+                if not key:
+                    continue
+            cur = int(round(values.delta / sleeptime)) if values.delta else ''
+            if key[0] != ' ':
+                if values.delta:
+                    tcur += values.delta
+                ptotal = values.value
+                ltotal = total
+            else:
+                ltotal = ptotal
+            self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key,
+                               values.value,
+                               values.value * 100 / float(ltotal), cur))
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
-        else:
+        if row > 4:
+            tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
             self.screen.addstr(row, 1, '%-40s %10d        %8s' %
-                               ('Total', total, tavg if tavg else ''),
-                               curses.A_BOLD)
+                               ('Total', total, tavg), curses.A_BOLD)
         self.screen.refresh()
 
-    def show_msg(self, text):
+    def _show_msg(self, text):
         """Display message centered text and exit on key press"""
         hint = 'Press any key to continue'
         curses.cbreak()
@@ -1139,16 +1227,16 @@ class Tui(object):
                            curses.A_STANDOUT)
         self.screen.getkey()
 
-    def show_help_interactive(self):
+    def _show_help_interactive(self):
         """Display help with list of interactive commands"""
         msg = ('   b     toggle events by guests (debugfs only, honors'
                ' filters)',
                '   c     clear filter',
                '   f     filter by regular expression',
-               '   g     filter by guest name',
+               '   g     filter by guest name/PID',
                '   h     display interactive commands reference',
                '   o     toggle sorting order (Total vs CurAvg/s)',
-               '   p     filter by PID',
+               '   p     filter by guest name/PID',
                '   q     quit',
                '   r     reset stats',
                '   s     set update interval',
@@ -1165,14 +1253,15 @@ class Tui(object):
             self.screen.addstr(row, 0, line)
             row += 1
         self.screen.getkey()
-        self.refresh_header()
+        self._refresh_header()
 
-    def show_filter_selection(self):
+    def _show_filter_selection(self):
         """Draws filter selection mask.
 
         Asks for a valid regex and sets the fields filter accordingly.
 
         """
+        msg = ''
         while True:
             self.screen.erase()
             self.screen.addstr(0, 0,
@@ -1181,61 +1270,25 @@ class Tui(object):
             self.screen.addstr(2, 0,
                                "Current regex: {0}"
                                .format(self.stats.fields_filter))
+            self.screen.addstr(5, 0, msg)
             self.screen.addstr(3, 0, "New regex: ")
             curses.echo()
             regex = self.screen.getstr().decode(ENCODING)
             curses.noecho()
             if len(regex) == 0:
-                self.stats.fields_filter = DEFAULT_REGEX
-                self.refresh_header()
+                self.stats.fields_filter = ''
+                self._refresh_header()
                 return
             try:
                 re.compile(regex)
                 self.stats.fields_filter = regex
-                self.refresh_header()
+                self._refresh_header()
                 return
             except re.error:
+                msg = '"' + regex + '": Not a valid regular expression'
                 continue
 
-    def show_vm_selection_by_pid(self):
-        """Draws PID selection mask.
-
-        Asks for a pid until a valid pid or 0 has been entered.
-
-        """
-        msg = ''
-        while True:
-            self.screen.erase()
-            self.screen.addstr(0, 0,
-                               'Show statistics for specific pid.',
-                               curses.A_BOLD)
-            self.screen.addstr(1, 0,
-                               'This might limit the shown data to the trace '
-                               'statistics.')
-            self.screen.addstr(5, 0, msg)
-            self.print_all_gnames(7)
-
-            curses.echo()
-            self.screen.addstr(3, 0, "Pid [0 or pid]: ")
-            pid = self.screen.getstr().decode(ENCODING)
-            curses.noecho()
-
-            try:
-                if len(pid) > 0:
-                    pid = int(pid)
-                    if pid != 0 and not os.path.isdir(os.path.join('/proc/',
-                                                                   str(pid))):
-                        msg = '"' + str(pid) + '": Not a running process'
-                        continue
-                else:
-                    pid = 0
-                self.refresh_header(pid)
-                self.update_pid(pid)
-                break
-            except ValueError:
-                msg = '"' + str(pid) + '": Not a valid pid'
-
-    def show_set_update_interval(self):
+    def _show_set_update_interval(self):
         """Draws update interval selection mask."""
         msg = ''
         while True:
@@ -1265,60 +1318,67 @@ class Tui(object):
 
             except ValueError:
                 msg = '"' + str(val) + '": Invalid value'
-        self.refresh_header()
+        self._refresh_header()
 
-    def show_vm_selection_by_guest_name(self):
+    def _show_vm_selection_by_guest(self):
         """Draws guest selection mask.
 
-        Asks for a guest name until a valid guest name or '' is entered.
+        Asks for a guest name or pid until a valid guest name or '' is entered.
 
         """
         msg = ''
         while True:
             self.screen.erase()
             self.screen.addstr(0, 0,
-                               'Show statistics for specific guest.',
+                               'Show statistics for specific guest or pid.',
                                curses.A_BOLD)
             self.screen.addstr(1, 0,
                                'This might limit the shown data to the trace '
                                'statistics.')
             self.screen.addstr(5, 0, msg)
-            self.print_all_gnames(7)
+            self._print_all_gnames(7)
             curses.echo()
-            self.screen.addstr(3, 0, "Guest [ENTER or guest]: ")
-            gname = self.screen.getstr().decode(ENCODING)
+            curses.curs_set(1)
+            self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ")
+            guest = self.screen.getstr().decode(ENCODING)
             curses.noecho()
 
-            if not gname:
-                self.refresh_header(0)
-                self.update_pid(0)
+            pid = 0
+            if not guest or guest == '0':
                 break
-            else:
-                pids = []
-                try:
-                    pids = self.get_pid_from_gname(gname)
-                except:
-                    msg = '"' + gname + '": Internal error while searching, ' \
-                          'use pid filter instead'
-                    continue
-                if len(pids) == 0:
-                    msg = '"' + gname + '": Not an active guest'
+            if guest.isdigit():
+                if not os.path.isdir(os.path.join('/proc/', guest)):
+                    msg = '"' + guest + '": Not a running process'
                     continue
-                if len(pids) > 1:
-                    msg = '"' + gname + '": Multiple matches found, use pid ' \
-                          'filter instead'
-                    continue
-                self.refresh_header(pids[0])
-                self.update_pid(pids[0])
+                pid = int(guest)
                 break
+            pids = []
+            try:
+                pids = self.get_pid_from_gname(guest)
+            except:
+                msg = '"' + guest + '": Internal error while searching, ' \
+                      'use pid filter instead'
+                continue
+            if len(pids) == 0:
+                msg = '"' + guest + '": Not an active guest'
+                continue
+            if len(pids) > 1:
+                msg = '"' + guest + '": Multiple matches found, use pid ' \
+                      'filter instead'
+                continue
+            pid = pids[0]
+            break
+        curses.curs_set(0)
+        self._refresh_header(pid)
+        self._update_pid(pid)
 
     def show_stats(self):
         """Refreshes the screen and processes user input."""
         sleeptime = self._delay_initial
-        self.refresh_header()
+        self._refresh_header()
         start = 0.0  # result based on init value never appears on screen
         while True:
-            self.refresh_body(time.time() - start)
+            self._refresh_body(time.time() - start)
             curses.halfdelay(int(sleeptime * 10))
             start = time.time()
             sleeptime = self._delay_regular
@@ -1327,47 +1387,39 @@ class Tui(object):
                 if char == 'b':
                     self._display_guests = not self._display_guests
                     if self.stats.toggle_display_guests(self._display_guests):
-                        self.show_msg(['Command not available with tracepoints'
-                                       ' enabled', 'Restart with debugfs only '
-                                       '(see option \'-d\') and try again!'])
+                        self._show_msg(['Command not available with '
+                                        'tracepoints enabled', 'Restart with '
+                                        'debugfs only (see option \'-d\') and '
+                                        'try again!'])
                         self._display_guests = not self._display_guests
-                    self.refresh_header()
+                    self._refresh_header()
                 if char == 'c':
-                    self.stats.fields_filter = DEFAULT_REGEX
-                    self.refresh_header(0)
-                    self.update_pid(0)
+                    self.stats.fields_filter = ''
+                    self._refresh_header(0)
+                    self._update_pid(0)
                 if char == 'f':
                     curses.curs_set(1)
-                    self.show_filter_selection()
+                    self._show_filter_selection()
                     curses.curs_set(0)
                     sleeptime = self._delay_initial
-                if char == 'g':
-                    curses.curs_set(1)
-                    self.show_vm_selection_by_guest_name()
-                    curses.curs_set(0)
+                if char == 'g' or char == 'p':
+                    self._show_vm_selection_by_guest()
                     sleeptime = self._delay_initial
                 if char == 'h':
-                    self.show_help_interactive()
+                    self._show_help_interactive()
                 if char == 'o':
                     self._sorting = not self._sorting
-                if char == 'p':
-                    curses.curs_set(1)
-                    self.show_vm_selection_by_pid()
-                    curses.curs_set(0)
-                    sleeptime = self._delay_initial
                 if char == 'q':
                     break
                 if char == 'r':
                     self.stats.reset()
                 if char == 's':
                     curses.curs_set(1)
-                    self.show_set_update_interval()
+                    self._show_set_update_interval()
                     curses.curs_set(0)
                     sleeptime = self._delay_initial
                 if char == 'x':
-                    self.update_drilldown()
-                    # prevents display of current values on next refresh
-                    self.stats.get(self._display_guests)
+                    self.stats.child_events = not self.stats.child_events
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1380,9 +1432,9 @@ def batch(stats):
         s = stats.get()
         time.sleep(1)
         s = stats.get()
-        for key in sorted(s.keys()):
-            values = s[key]
-            print('%-42s%10d%10d' % (key, values[0], values[1]))
+        for key, values in sorted(s.items()):
+            print('%-42s%10d%10d' % (key.split(' ')[0], values.value,
+                  values.delta))
     except KeyboardInterrupt:
         pass
 
@@ -1392,14 +1444,14 @@ def log(stats):
     keys = sorted(stats.get().keys())
 
     def banner():
-        for k in keys:
-            print(k, end=' ')
+        for key in keys:
+            print(key.split(' ')[0], end=' ')
         print()
 
     def statline():
         s = stats.get()
-        for k in keys:
-            print(' %9d' % s[k][1], end=' ')
+        for key in keys:
+            print(' %9d' % s[key].delta, end=' ')
         print()
     line = 0
     banner_repeat = 20
@@ -1504,7 +1556,7 @@ Press any other key to refresh statistics immediately.
                          )
     optparser.add_option('-f', '--fields',
                          action='store',
-                         default=DEFAULT_REGEX,
+                         default='',
                          dest='fields',
                          help='''fields to display (regex)
                                  "-f help" for a list of available events''',
@@ -1539,17 +1591,6 @@ Press any other key to refresh statistics immediately.
 
 def check_access(options):
     """Exits if the current user can't access all needed directories."""
-    if not os.path.exists('/sys/kernel/debug'):
-        sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.')
-        sys.exit(1)
-
-    if not os.path.exists(PATH_DEBUGFS_KVM):
-        sys.stderr.write("Please make sure, that debugfs is mounted and "
-                         "readable by the current user:\n"
-                         "('mount -t debugfs debugfs /sys/kernel/debug')\n"
-                         "Also ensure, that the kvm modules are loaded.\n")
-        sys.exit(1)
-
     if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or
                                                      not options.debugfs):
         sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
@@ -1567,7 +1608,33 @@ def check_access(options):
     return options
 
 
+def assign_globals():
+    global PATH_DEBUGFS_KVM
+    global PATH_DEBUGFS_TRACING
+
+    debugfs = ''
+    for line in file('/proc/mounts'):
+        if line.split(' ')[0] == 'debugfs':
+            debugfs = line.split(' ')[1]
+            break
+    if debugfs == '':
+        sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in "
+                         "your kernel, mounted and\nreadable by the current "
+                         "user:\n"
+                         "('mount -t debugfs debugfs /sys/kernel/debug')\n")
+        sys.exit(1)
+
+    PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm')
+    PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing')
+
+    if not os.path.exists(PATH_DEBUGFS_KVM):
+        sys.stderr.write("Please make sure that CONFIG_KVM is enabled in "
+                         "your kernel and that the modules are loaded.\n")
+        sys.exit(1)
+
+
 def main():
+    assign_globals()
     options = get_options()
     options = check_access(options)
 
index b5b3810..0811d86 100644 (file)
@@ -35,13 +35,13 @@ INTERACTIVE COMMANDS
 
 *f*::  filter by regular expression
 
-*g*::  filter by guest name
+*g*::  filter by guest name/PID
 
 *h*::  display interactive commands reference
 
 *o*::   toggle sorting order (Total vs CurAvg/s)
 
-*p*::  filter by PID
+*p*::  filter by guest name/PID
 
 *q*::  quit
 
index 57254f5..694abc6 100644 (file)
@@ -29,7 +29,7 @@
 #include "builtin.h"
 #include "check.h"
 
-bool no_fp, no_unreachable;
+bool no_fp, no_unreachable, retpoline, module;
 
 static const char * const check_usage[] = {
        "objtool check [<options>] file.o",
@@ -39,6 +39,8 @@ static const char * const check_usage[] = {
 const struct option check_options[] = {
        OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
        OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
+       OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+       OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
        OPT_END(),
 };
 
@@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv)
 
        objname = argv[0];
 
-       return check(objname, no_fp, no_unreachable, false);
+       return check(objname, false);
 }
index 91e8e19..77ea2b9 100644 (file)
@@ -25,7 +25,6 @@
  */
 
 #include <string.h>
-#include <subcmd/parse-options.h>
 #include "builtin.h"
 #include "check.h"
 
@@ -36,9 +35,6 @@ static const char *orc_usage[] = {
        NULL,
 };
 
-extern const struct option check_options[];
-extern bool no_fp, no_unreachable;
-
 int cmd_orc(int argc, const char **argv)
 {
        const char *objname;
@@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv)
 
                objname = argv[0];
 
-               return check(objname, no_fp, no_unreachable, true);
+               return check(objname, true);
        }
 
        if (!strcmp(argv[0], "dump")) {
index dd52606..28ff40e 100644 (file)
 #ifndef _BUILTIN_H
 #define _BUILTIN_H
 
+#include <subcmd/parse-options.h>
+
+extern const struct option check_options[];
+extern bool no_fp, no_unreachable, retpoline, module;
+
 extern int cmd_check(int argc, const char **argv);
 extern int cmd_orc(int argc, const char **argv);
 
index a8cb69a..46c1d23 100644 (file)
@@ -18,6 +18,7 @@
 #include <string.h>
 #include <stdlib.h>
 
+#include "builtin.h"
 #include "check.h"
 #include "elf.h"
 #include "special.h"
@@ -33,7 +34,6 @@ struct alternative {
 };
 
 const char *objname;
-static bool no_fp;
 struct cfi_state initial_func_cfi;
 
 struct instruction *find_insn(struct objtool_file *file,
@@ -497,6 +497,7 @@ static int add_jump_destinations(struct objtool_file *file)
                         * disguise, so convert them accordingly.
                         */
                        insn->type = INSN_JUMP_DYNAMIC;
+                       insn->retpoline_safe = true;
                        continue;
                } else {
                        /* sibling call */
@@ -548,7 +549,8 @@ static int add_call_destinations(struct objtool_file *file)
                        if (!insn->call_dest && !insn->ignore) {
                                WARN_FUNC("unsupported intra-function call",
                                          insn->sec, insn->offset);
-                               WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+                               if (retpoline)
+                                       WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
                                return -1;
                        }
 
@@ -923,7 +925,11 @@ static struct rela *find_switch_table(struct objtool_file *file,
                if (find_symbol_containing(file->rodata, text_rela->addend))
                        continue;
 
-               return find_rela_by_dest(file->rodata, text_rela->addend);
+               rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
+               if (!rodata_rela)
+                       continue;
+
+               return rodata_rela;
        }
 
        return NULL;
@@ -1108,6 +1114,54 @@ static int read_unwind_hints(struct objtool_file *file)
        return 0;
 }
 
+static int read_retpoline_hints(struct objtool_file *file)
+{
+       struct section *sec, *relasec;
+       struct instruction *insn;
+       struct rela *rela;
+       int i;
+
+       sec = find_section_by_name(file->elf, ".discard.retpoline_safe");
+       if (!sec)
+               return 0;
+
+       relasec = sec->rela;
+       if (!relasec) {
+               WARN("missing .rela.discard.retpoline_safe section");
+               return -1;
+       }
+
+       if (sec->len % sizeof(unsigned long)) {
+               WARN("retpoline_safe size mismatch: %d %ld", sec->len, sizeof(unsigned long));
+               return -1;
+       }
+
+       for (i = 0; i < sec->len / sizeof(unsigned long); i++) {
+               rela = find_rela_by_dest(sec, i * sizeof(unsigned long));
+               if (!rela) {
+                       WARN("can't find rela for retpoline_safe[%d]", i);
+                       return -1;
+               }
+
+               insn = find_insn(file, rela->sym->sec, rela->addend);
+               if (!insn) {
+                       WARN("can't find insn for retpoline_safe[%d]", i);
+                       return -1;
+               }
+
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC) {
+                       WARN_FUNC("retpoline_safe hint not a indirect jump/call",
+                                 insn->sec, insn->offset);
+                       return -1;
+               }
+
+               insn->retpoline_safe = true;
+       }
+
+       return 0;
+}
+
 static int decode_sections(struct objtool_file *file)
 {
        int ret;
@@ -1146,6 +1200,10 @@ static int decode_sections(struct objtool_file *file)
        if (ret)
                return ret;
 
+       ret = read_retpoline_hints(file);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -1891,6 +1949,38 @@ static int validate_unwind_hints(struct objtool_file *file)
        return warnings;
 }
 
+static int validate_retpoline(struct objtool_file *file)
+{
+       struct instruction *insn;
+       int warnings = 0;
+
+       for_each_insn(file, insn) {
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC)
+                       continue;
+
+               if (insn->retpoline_safe)
+                       continue;
+
+               /*
+                * .init.text code is ran before userspace and thus doesn't
+                * strictly need retpolines, except for modules which are
+                * loaded late, they very much do need retpoline in their
+                * .init.text
+                */
+               if (!strcmp(insn->sec->name, ".init.text") && !module)
+                       continue;
+
+               WARN_FUNC("indirect %s found in RETPOLINE build",
+                         insn->sec, insn->offset,
+                         insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+
+               warnings++;
+       }
+
+       return warnings;
+}
+
 static bool is_kasan_insn(struct instruction *insn)
 {
        return (insn->type == INSN_CALL &&
@@ -2022,13 +2112,12 @@ static void cleanup(struct objtool_file *file)
        elf_close(file->elf);
 }
 
-int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
+int check(const char *_objname, bool orc)
 {
        struct objtool_file file;
        int ret, warnings = 0;
 
        objname = _objname;
-       no_fp = _no_fp;
 
        file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
        if (!file.elf)
@@ -2052,6 +2141,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
        if (list_empty(&file.insn_list))
                goto out;
 
+       if (retpoline) {
+               ret = validate_retpoline(&file);
+               if (ret < 0)
+                       return ret;
+               warnings += ret;
+       }
+
        ret = validate_functions(&file);
        if (ret < 0)
                goto out;
index 23a1d06..c6b68fc 100644 (file)
@@ -45,6 +45,7 @@ struct instruction {
        unsigned char type;
        unsigned long immediate;
        bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+       bool retpoline_safe;
        struct symbol *call_dest;
        struct instruction *jump_dest;
        struct instruction *first_jump_src;
@@ -63,7 +64,7 @@ struct objtool_file {
        bool ignore_unreachables, c_file, hints;
 };
 
-int check(const char *objname, bool no_fp, bool no_unreachable, bool orc);
+int check(const char *objname, bool orc);
 
 struct instruction *find_insn(struct objtool_file *file,
                              struct section *sec, unsigned long offset);
index 44ef9eb..6c645eb 100644 (file)
@@ -178,6 +178,55 @@ void idr_get_next_test(int base)
        idr_destroy(&idr);
 }
 
+int idr_u32_cb(int id, void *ptr, void *data)
+{
+       BUG_ON(id < 0);
+       BUG_ON(ptr != DUMMY_PTR);
+       return 0;
+}
+
+void idr_u32_test1(struct idr *idr, u32 handle)
+{
+       static bool warned = false;
+       u32 id = handle;
+       int sid = 0;
+       void *ptr;
+
+       BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
+       BUG_ON(id != handle);
+       BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
+       BUG_ON(id != handle);
+       if (!warned && id > INT_MAX)
+               printk("vvv Ignore these warnings\n");
+       ptr = idr_get_next(idr, &sid);
+       if (id > INT_MAX) {
+               BUG_ON(ptr != NULL);
+               BUG_ON(sid != 0);
+       } else {
+               BUG_ON(ptr != DUMMY_PTR);
+               BUG_ON(sid != id);
+       }
+       idr_for_each(idr, idr_u32_cb, NULL);
+       if (!warned && id > INT_MAX) {
+               printk("^^^ Warnings over\n");
+               warned = true;
+       }
+       BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
+       BUG_ON(!idr_is_empty(idr));
+}
+
+void idr_u32_test(int base)
+{
+       DEFINE_IDR(idr);
+       idr_init_base(&idr, base);
+       idr_u32_test1(&idr, 10);
+       idr_u32_test1(&idr, 0x7fffffff);
+       idr_u32_test1(&idr, 0x80000000);
+       idr_u32_test1(&idr, 0x80000001);
+       idr_u32_test1(&idr, 0xffe00000);
+       idr_u32_test1(&idr, 0xffffffff);
+}
+
 void idr_checks(void)
 {
        unsigned long i;
@@ -248,6 +297,9 @@ void idr_checks(void)
        idr_get_next_test(0);
        idr_get_next_test(1);
        idr_get_next_test(4);
+       idr_u32_test(4);
+       idr_u32_test(1);
+       idr_u32_test(0);
 }
 
 /*
index 6903ccf..44a0d1a 100644 (file)
@@ -29,7 +29,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
 {
        struct radix_tree_node *node;
 
-       if (flags & __GFP_NOWARN)
+       if (!(flags & __GFP_DIRECT_RECLAIM))
                return NULL;
 
        pthread_mutex_lock(&cachep->lock);
@@ -73,10 +73,17 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 
 void *kmalloc(size_t size, gfp_t gfp)
 {
-       void *ret = malloc(size);
+       void *ret;
+
+       if (!(gfp & __GFP_DIRECT_RECLAIM))
+               return NULL;
+
+       ret = malloc(size);
        uatomic_inc(&nr_allocated);
        if (kmalloc_verbose)
                printf("Allocating %p from malloc\n", ret);
+       if (gfp & __GFP_ZERO)
+               memset(ret, 0, size);
        return ret;
 }
 
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
new file mode 100644 (file)
index 0000000..e69de29
index e9fff59..e3201cc 100644 (file)
@@ -11,6 +11,7 @@
 #define __GFP_IO               0x40u
 #define __GFP_FS               0x80u
 #define __GFP_NOWARN           0x200u
+#define __GFP_ZERO             0x8000u
 #define __GFP_ATOMIC           0x80000u
 #define __GFP_ACCOUNT          0x100000u
 #define __GFP_DIRECT_RECLAIM   0x400000u
index 979baee..a037def 100644 (file)
@@ -3,6 +3,7 @@
 #define SLAB_H
 
 #include <linux/types.h>
+#include <linux/gfp.h>
 
 #define SLAB_HWCACHE_ALIGN 1
 #define SLAB_PANIC 2
 void *kmalloc(size_t size, gfp_t);
 void kfree(void *);
 
+static inline void *kzalloc(size_t size, gfp_t gfp)
+{
+        return kmalloc(size, gfp | __GFP_ZERO);
+}
+
 void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
 void kmem_cache_free(struct kmem_cache *cachep, void *objp);
 
index 1a74922..f6304d2 100644 (file)
@@ -11,11 +11,11 @@ all:
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
-               #SUBDIR test prog name should be in the form: SUBDIR_test.sh
+               #SUBDIR test prog name should be in the form: SUBDIR_test.sh \
                TEST=$$DIR"_test.sh"; \
-               if [ -e $$DIR/$$TEST ]; then
-                       rsync -a $$DIR/$$TEST $$BUILD_TARGET/;
-               fi
+               if [ -e $$DIR/$$TEST ]; then \
+                       rsync -a $$DIR/$$TEST $$BUILD_TARGET/; \
+               fi \
        done
 
 override define RUN_TESTS
index 9eb05f3..86d7ff4 100644 (file)
@@ -11311,7 +11311,64 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .retval = 2,
        },
-
+       {
+               "xadd/w check unaligned stack",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "misaligned stack access off",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "xadd/w check unaligned map",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = REJECT,
+               .errstr = "misaligned value access off",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "xadd/w check unaligned pkt",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 99),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+                       BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+                       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+                       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "BPF_XADD stores into R2 packet",
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
index cea4adc..a63e845 100644 (file)
@@ -12,9 +12,9 @@ all:
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
-               if [ -e $$DIR/$(TEST_PROGS) ]; then
-                       rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/;
-               fi
+               if [ -e $$DIR/$(TEST_PROGS) ]; then \
+                       rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
+               fi \
        done
 
 override define RUN_TESTS
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
new file mode 100644 (file)
index 0000000..835c7f4
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_FUSE_FS=m
index 86636d2..686da51 100644 (file)
@@ -4,8 +4,9 @@ all:
 include ../lib.mk
 
 TEST_PROGS := mem-on-off-test.sh
-override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
-override EMIT_TESTS := echo "$(RUN_TESTS)"
+override RUN_TESTS := @./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
+
+override EMIT_TESTS := echo "$(subst @,,$(RUN_TESTS))"
 
 run_full_test:
        @/bin/bash ./mem-on-off-test.sh && echo "memory-hotplug selftests: [PASS]" || echo "memory-hotplug selftests: [FAIL]"
index 6a8e5a9..d148f9f 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y
 CONFIG_PSTORE=y
 CONFIG_PSTORE_PMSG=y
 CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=m
index b3c8ba3..d0121a8 100644 (file)
@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS)
        $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS)
 
 $(OBJS): $(OUTPUT)/%.o: %.c
-       $(CC) -c $^ -o $@
+       $(CC) -c $^ -o $@ $(CFLAGS)
 
 $(TESTS): $(OUTPUT)/%.o: %.c
        $(CC) -c $^ -o $@
index e340750..90bba48 100644 (file)
         "cmdUnderTest": "$TC actions ls action skbmod",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action skbmod index 4",
-        "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x0031",
+        "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x31",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action skbmod"
index 3d5a62f..f5d7a78 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
+include ../lib.mk
+
 ifndef CROSS_COMPILE
 CFLAGS := -std=gnu99
 CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y)
 LDLIBS += -lgcc_s
 endif
 
-TEST_PROGS := vdso_test vdso_standalone_test_x86
+TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86
 
 all: $(TEST_PROGS)
-vdso_test: parse_vdso.c vdso_test.c
-vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
+$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c
+$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
        $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
                vdso_standalone_test_x86.c parse_vdso.c \
-               -o vdso_standalone_test_x86
+               -o $@
 
-include ../lib.mk
-clean:
-       rm -fr $(TEST_PROGS)
+EXTRA_CLEAN := $(TEST_PROGS)
 endif
index 63c94d7..342c7bc 100644 (file)
@@ -11,3 +11,4 @@ mlock-intersect-test
 mlock-random-test
 virtual_address_range
 gup_benchmark
+va_128TBswitch
index 70268c0..70f4c30 100644 (file)
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
 static unsigned int host_vtimer_irq;
 static u32 host_vtimer_irq_flags;
 
+static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
+
 static const struct kvm_irq_level default_ptimer_irq = {
        .irq    = 30,
        .level  = 1,
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
        return timecounter->cc->read(timecounter->cc);
 }
 
+static inline bool userspace_irqchip(struct kvm *kvm)
+{
+       return static_branch_unlikely(&userspace_irqchip_in_use) &&
+               unlikely(!irqchip_in_kernel(kvm));
+}
+
 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
 {
        hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
                cancel_work_sync(work);
 }
 
-static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-
-       /*
-        * When using a userspace irqchip with the architected timers, we must
-        * prevent continuously exiting from the guest, and therefore mask the
-        * physical interrupt by disabling it on the host interrupt controller
-        * when the virtual level is high, such that the guest can make
-        * forward progress.  Once we detect the output level being
-        * de-asserted, we unmask the interrupt again so that we exit from the
-        * guest when the timer fires.
-        */
-       if (vtimer->irq.level)
-               disable_percpu_irq(host_vtimer_irq);
-       else
-               enable_percpu_irq(host_vtimer_irq, 0);
-}
-
 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
        if (kvm_timer_should_fire(vtimer))
                kvm_timer_update_irq(vcpu, true, vtimer);
 
-       if (static_branch_unlikely(&userspace_irqchip_in_use) &&
-           unlikely(!irqchip_in_kernel(vcpu->kvm)))
-               kvm_vtimer_update_mask_user(vcpu);
+       if (userspace_irqchip(vcpu->kvm) &&
+           !static_branch_unlikely(&has_gic_active_state))
+               disable_percpu_irq(host_vtimer_irq);
 
        return IRQ_HANDLED;
 }
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
        trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
                                   timer_ctx->irq.level);
 
-       if (!static_branch_unlikely(&userspace_irqchip_in_use) ||
-           likely(irqchip_in_kernel(vcpu->kvm))) {
+       if (!userspace_irqchip(vcpu->kvm)) {
                ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
                                          timer_ctx->irq.irq,
                                          timer_ctx->irq.level,
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
        phys_timer_emulate(vcpu);
 }
 
-static void __timer_snapshot_state(struct arch_timer_context *timer)
-{
-       timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-       timer->cnt_cval = read_sysreg_el0(cntv_cval);
-}
-
 static void vtimer_save_state(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
        if (!vtimer->loaded)
                goto out;
 
-       if (timer->enabled)
-               __timer_snapshot_state(vtimer);
+       if (timer->enabled) {
+               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
+               vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
+       }
 
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
        kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
 }
 
-static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu)
+static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
+{
+       int r;
+       r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
+       WARN_ON(r);
+}
+
+static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        bool phys_active;
-       int ret;
 
-       phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
-
-       ret = irq_set_irqchip_state(host_vtimer_irq,
-                                   IRQCHIP_STATE_ACTIVE,
-                                   phys_active);
-       WARN_ON(ret);
+       if (irqchip_in_kernel(vcpu->kvm))
+               phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
+       else
+               phys_active = vtimer->irq.level;
+       set_vtimer_irq_phys_active(vcpu, phys_active);
 }
 
-static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu)
+static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 {
-       kvm_vtimer_update_mask_user(vcpu);
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+
+       /*
+        * When using a userspace irqchip with the architected timers and a
+        * host interrupt controller that doesn't support an active state, we
+        * must still prevent continuously exiting from the guest, and
+        * therefore mask the physical interrupt by disabling it on the host
+        * interrupt controller when the virtual level is high, such that the
+        * guest can make forward progress.  Once we detect the output level
+        * being de-asserted, we unmask the interrupt again so that we exit
+        * from the guest when the timer fires.
+        */
+       if (vtimer->irq.level)
+               disable_percpu_irq(host_vtimer_irq);
+       else
+               enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 }
 
 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
        if (unlikely(!timer->enabled))
                return;
 
-       if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
-               kvm_timer_vcpu_load_user(vcpu);
+       if (static_branch_likely(&has_gic_active_state))
+               kvm_timer_vcpu_load_gic(vcpu);
        else
-               kvm_timer_vcpu_load_vgic(vcpu);
+               kvm_timer_vcpu_load_nogic(vcpu);
 
        set_cntvoff(vtimer->cntvoff);
 
@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
-       if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
-               __timer_snapshot_state(vtimer);
-               if (!kvm_timer_should_fire(vtimer)) {
-                       kvm_timer_update_irq(vcpu, false, vtimer);
-                       kvm_vtimer_update_mask_user(vcpu);
-               }
+       if (!kvm_timer_should_fire(vtimer)) {
+               kvm_timer_update_irq(vcpu, false, vtimer);
+               if (static_branch_likely(&has_gic_active_state))
+                       set_vtimer_irq_phys_active(vcpu, false);
+               else
+                       enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
        }
 }
 
 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
 {
-       unmask_vtimer_irq_user(vcpu);
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       if (unlikely(!timer->enabled))
+               return;
+
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+               unmask_vtimer_irq_user(vcpu);
 }
 
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
                        kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
                        goto out_free_irq;
                }
+
+               static_branch_enable(&has_gic_active_state);
        }
 
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
index 4501e65..65dea3f 100644 (file)
@@ -969,8 +969,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                /* Check for overlaps */
                r = -EEXIST;
                kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
-                       if ((slot->id >= KVM_USER_MEM_SLOTS) ||
-                           (slot->id == id))
+                       if (slot->id == id)
                                continue;
                        if (!((base_gfn + npages <= slot->base_gfn) ||
                              (base_gfn >= slot->base_gfn + slot->npages)))