Merge tag 'drm-misc-fixes-2022-06-16' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Thu, 16 Jun 2022 23:31:22 +0000 (09:31 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 16 Jun 2022 23:31:37 +0000 (09:31 +1000)
Two fixes for TTM, one for a NULL pointer dereference and one to make sure
the buffer is pinned prior to a bulk move, and a fix for a spurious
compiler warning.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20220616072519.qwrsefsemejefowu@houat
426 files changed:
Documentation/ABI/testing/sysfs-ata
Documentation/ABI/testing/sysfs-driver-bd9571mwv-regulator
Documentation/arm/tcm.rst
Documentation/arm64/sme.rst
Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
Documentation/devicetree/bindings/display/arm,malidp.yaml
Documentation/devicetree/bindings/display/msm/dpu-sc7180.yaml
Documentation/devicetree/bindings/display/msm/dpu-sc7280.yaml
Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-20nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
Documentation/devicetree/bindings/hwmon/vexpress.txt
Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/memory-controllers/nvidia,tegra186-mc.yaml
Documentation/devicetree/bindings/mfd/maxim,max77714.yaml
Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml
Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
Documentation/devicetree/bindings/phy/phy-stih407-usb.txt
Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt
Documentation/devicetree/bindings/pinctrl/ralink,mt7620-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/ralink,rt305x-pinctrl.yaml
Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
Documentation/devicetree/bindings/regulator/qcom,usb-vbus-regulator.yaml
Documentation/devicetree/bindings/regulator/vexpress.txt
Documentation/devicetree/bindings/usb/dwc3-st.txt
Documentation/devicetree/bindings/usb/ehci-st.txt
Documentation/devicetree/bindings/usb/ohci-st.txt
Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
Documentation/driver-api/hte/hte.rst [new file with mode: 0644]
Documentation/driver-api/hte/index.rst [new file with mode: 0644]
Documentation/driver-api/hte/tegra194-hte.rst [new file with mode: 0644]
Documentation/driver-api/index.rst
Documentation/features/core/cBPF-JIT/arch-support.txt
Documentation/features/core/eBPF-JIT/arch-support.txt
Documentation/features/core/generic-idle-thread/arch-support.txt
Documentation/features/core/jump-labels/arch-support.txt
Documentation/features/core/thread-info-in-task/arch-support.txt
Documentation/features/core/tracehook/arch-support.txt
Documentation/features/debug/KASAN/arch-support.txt
Documentation/features/debug/debug-vm-pgtable/arch-support.txt
Documentation/features/debug/gcov-profile-all/arch-support.txt
Documentation/features/debug/kcov/arch-support.txt
Documentation/features/debug/kgdb/arch-support.txt
Documentation/features/debug/kmemleak/arch-support.txt
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
Documentation/features/debug/kprobes/arch-support.txt
Documentation/features/debug/kretprobes/arch-support.txt
Documentation/features/debug/optprobes/arch-support.txt
Documentation/features/debug/stackprotector/arch-support.txt
Documentation/features/debug/uprobes/arch-support.txt
Documentation/features/debug/user-ret-profiler/arch-support.txt
Documentation/features/io/dma-contiguous/arch-support.txt
Documentation/features/locking/cmpxchg-local/arch-support.txt
Documentation/features/locking/lockdep/arch-support.txt
Documentation/features/locking/queued-rwlocks/arch-support.txt
Documentation/features/locking/queued-spinlocks/arch-support.txt
Documentation/features/perf/kprobes-event/arch-support.txt
Documentation/features/perf/perf-regs/arch-support.txt
Documentation/features/perf/perf-stackdump/arch-support.txt
Documentation/features/sched/membarrier-sync-core/arch-support.txt
Documentation/features/sched/numa-balancing/arch-support.txt
Documentation/features/seccomp/seccomp-filter/arch-support.txt
Documentation/features/time/arch-tick-broadcast/arch-support.txt
Documentation/features/time/clockevents/arch-support.txt
Documentation/features/time/context-tracking/arch-support.txt
Documentation/features/time/irq-time-acct/arch-support.txt
Documentation/features/time/virt-cpuacct/arch-support.txt
Documentation/features/vm/ELF-ASLR/arch-support.txt
Documentation/features/vm/PG_uncached/arch-support.txt
Documentation/features/vm/THP/arch-support.txt
Documentation/features/vm/TLB/arch-support.txt
Documentation/features/vm/huge-vmap/arch-support.txt
Documentation/features/vm/ioremap_prot/arch-support.txt
Documentation/features/vm/pte_special/arch-support.txt
Documentation/filesystems/netfs_library.rst
Documentation/hte/hte.rst [deleted file]
Documentation/hte/index.rst [deleted file]
Documentation/hte/tegra194-hte.rst [deleted file]
Documentation/index.rst
Documentation/process/changes.rst
Documentation/usb/usbmon.rst
MAINTAINERS
Makefile
arch/arm/include/asm/xen/xen-ops.h [new file with mode: 0644]
arch/arm/mm/dma-mapping.c
arch/arm/xen/enlighten.c
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/xen/xen-ops.h [new file with mode: 0644]
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/mte.c
arch/arm64/mm/dma-mapping.c
arch/arm64/net/bpf_jit_comp.c
arch/arm64/tools/gen-sysreg.awk
arch/loongarch/Kconfig
arch/loongarch/include/asm/hardirq.h
arch/loongarch/include/asm/percpu.h
arch/loongarch/include/asm/smp.h
arch/loongarch/include/asm/timex.h
arch/loongarch/kernel/acpi.c
arch/loongarch/kernel/cacheinfo.c
arch/loongarch/kernel/irq.c
arch/loongarch/kernel/process.c
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/smp.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/thread_info.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace/ptrace-fpu.c
arch/powerpc/kernel/ptrace/ptrace.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kexec/crash.c
arch/powerpc/mm/nohash/kaslr_booke.c
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/pseries/papr_scm.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/mm/init.c
arch/um/drivers/virt-pci.c
arch/x86/Kconfig
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/uaccess.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_iter.c
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/xen.h
arch/x86/mm/mem_encrypt.c
arch/x86/mm/mem_encrypt_amd.c
arch/x86/xen/enlighten_hvm.c
arch/x86/xen/enlighten_pv.c
block/bio.c
certs/Makefile
certs/extract-cert.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/libata-transport.c
drivers/ata/pata_octeon_cf.c
drivers/char/Kconfig
drivers/char/hw_random/virtio-rng.c
drivers/char/random.c
drivers/gpio/gpio-crystalcove.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-wcove.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
drivers/gpu/drm/amd/display/include/ddc_service_types.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/idle/intel_idle.c
drivers/input/joystick/Kconfig
drivers/input/misc/soc_button_array.c
drivers/input/mouse/bcm5974.c
drivers/md/dm-core.h
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h
drivers/mmc/core/block.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/net/amt.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/realtek/rtl8365mb.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/au1000_eth.h
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/phy/dp83867.c
drivers/net/phy/mdio_bus.c
drivers/nfc/nfcmrvl/usb.c
drivers/nfc/st21nfca/se.c
drivers/platform/mellanox/Kconfig
drivers/platform/mellanox/nvsw-sn2201.c
drivers/platform/mips/Kconfig
drivers/platform/x86/barco-p50-gpio.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/intel/pmt/crashlog.c
drivers/scsi/ipr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/pmcraid.c
drivers/scsi/sd.c
drivers/scsi/vmw_pvscsi.h
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vhost/vdpa.c
drivers/vhost/vringh.c
drivers/virtio/Kconfig
drivers/virtio/virtio.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/xen/Kconfig
drivers/xen/Makefile
drivers/xen/grant-dma-iommu.c [new file with mode: 0644]
drivers/xen/grant-dma-ops.c [new file with mode: 0644]
drivers/xen/grant-table.c
drivers/xen/xlate_mmu.c
fs/9p/cache.c
fs/9p/v9fs.c
fs/9p/v9fs.h
fs/9p/vfs_addr.c
fs/9p/vfs_inode.c
fs/afs/callback.c
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/dir_silly.c
fs/afs/dynroot.c
fs/afs/file.c
fs/afs/fs_operation.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/super.c
fs/afs/volume.c
fs/afs/write.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/cache.h
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/ext2/inode.c
fs/fs-writeback.c
fs/inode.c
fs/netfs/buffered_read.c
fs/netfs/internal.h
fs/netfs/objects.c
fs/nfsd/filecache.c
fs/quota/dquot.c
fs/zonefs/super.c
include/asm-generic/Kbuild
include/asm-generic/platform-feature.h [new file with mode: 0644]
include/linux/bio.h
include/linux/crc-itu-t.h
include/linux/libata.h
include/linux/mm_types.h
include/linux/netfs.h
include/linux/platform-feature.h [new file with mode: 0644]
include/linux/random.h
include/linux/sunrpc/xdr.h
include/linux/vdpa.h
include/linux/virtio_config.h
include/linux/workqueue.h
include/linux/xarray.h
include/net/flow_offload.h
include/net/ipv6.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_offload.h
include/trace/events/workqueue.h
include/uapi/linux/tls.h
include/xen/arm/xen-ops.h [new file with mode: 0644]
include/xen/grant_table.h
include/xen/xen-ops.h
include/xen/xen.h
init/Kconfig
kernel/Makefile
kernel/bpf/btf.c
kernel/dma/debug.c
kernel/dma/swiotlb.c
kernel/entry/kvm.c
kernel/platform-feature.c [new file with mode: 0644]
kernel/reboot.c
kernel/trace/bpf_trace.c
kernel/workqueue.c
lib/crc-itu-t.c
lib/iov_iter.c
lib/vsprintf.c
lib/xarray.c
mm/filemap.c
mm/huge_memory.c
mm/readahead.c
net/core/flow_offload.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/xfrm4_protocol.c
net/ipv6/ip6_output.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_local.c
net/l2tp/l2tp_ip6.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nft_nat.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/sunrpc/xdr.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/tls/tls_main.c
net/unix/af_unix.c
net/xdp/xsk.c
net/xdp/xsk_queue.h
scripts/Makefile.build
scripts/check-local-export
scripts/gdb/linux/config.py
scripts/nsdeps
scripts/sign-file.c
security/keys/trusted-keys/trusted_tpm2.c
sound/hda/hdac_device.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/cs35l36.c
sound/soc/codecs/cs42l51.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l56.c
sound/soc/codecs/cs53l30.c
sound/soc/codecs/es8328.c
sound/soc/codecs/nau8822.c
sound/soc/codecs/nau8822.h
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/fsl_sai.c
sound/soc/intel/boards/sof_cirrus_common.c
sound/soc/qcom/lpass-platform.c
sound/soc/sof/sof-audio.c
sound/soc/sof/sof-client-ipc-msg-injector.c
sound/usb/pcm.c
sound/usb/quirks-table.h
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
tools/testing/selftests/bpf/progs/freplace_global_func.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/net/bpf/Makefile
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/wireguard/qemu/Makefile
tools/testing/selftests/wireguard/qemu/init.c
tools/testing/selftests/wireguard/qemu/kernel.config
virt/kvm/kvm_main.c

index 2f726c9..3daecac 100644 (file)
@@ -107,13 +107,14 @@ Description:
                                described in ATA8 7.16 and 7.17. Only valid if
                                the device is not a PM.
 
-               pio_mode:       (RO) Transfer modes supported by the device when
-                               in PIO mode. Mostly used by PATA device.
+               pio_mode:       (RO) PIO transfer mode used by the device.
+                               Mostly used by PATA devices.
 
-               xfer_mode:      (RO) Current transfer mode
+               xfer_mode:      (RO) Current transfer mode. Mostly used by
+                               PATA devices.
 
-               dma_mode:       (RO) Transfer modes supported by the device when
-                               in DMA mode. Mostly used by PATA device.
+               dma_mode:       (RO) DMA transfer mode used by the device.
+                               Mostly used by PATA devices.
 
                class:          (RO) Device class. Can be "ata" for disk,
                                "atapi" for packet device, "pmp" for PM, or
index 42214b4..90596d8 100644 (file)
@@ -26,6 +26,6 @@ Description:  Read/write the current state of DDR Backup Mode, which controls
                     DDR Backup Mode must be explicitly enabled by the user,
                     to invoke step 1.
 
-               See also Documentation/devicetree/bindings/mfd/bd9571mwv.txt.
+               See also Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml.
 Users:         User space applications for embedded boards equipped with a
                BD9571MWV PMIC.
index b256f97..1dc6c39 100644 (file)
@@ -34,7 +34,7 @@ CPU so it is usually wise not to overlap any physical RAM with
 the TCM.
 
 The TCM memory can then be remapped to another address again using
-the MMU, but notice that the TCM if often used in situations where
+the MMU, but notice that the TCM is often used in situations where
 the MMU is turned off. To avoid confusion the current Linux
 implementation will map the TCM 1 to 1 from physical to virtual
 memory in the location specified by the kernel. Currently Linux
index 8ba677b..937147f 100644 (file)
@@ -371,7 +371,7 @@ The regset data starts with struct user_za_header, containing:
 Appendix A.  SME programmer's model (informative)
 =================================================
 
-This section provides a minimal description of the additions made by SVE to the
+This section provides a minimal description of the additions made by SME to the
 ARMv8-A programmer's model that are relevant to this document.
 
 Note: This section is for information only and not intended to be complete or
index be66f1e..7c331bf 100644 (file)
@@ -45,7 +45,7 @@ description: |
   The case where SH and SP are both 1 is likely not very interesting.
 
 maintainers:
-  - Luca Ceresoli <luca@lucaceresoli.net>
+  - Luca Ceresoli <luca.ceresoli@bootlin.com>
 
 properties:
   compatible:
index 73470ec..ce91a91 100644 (file)
@@ -16,7 +16,7 @@ has been processed. See [2] for more information on the brcm,l2-intc node.
 firmware. On some SoCs, this firmware supports DFS and DVFS in addition to
 Adaptive Voltage Scaling.
 
-[2] Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+[2] Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.yaml
 
 
 Node brcm,avs-cpu-data-mem
index 795a08a..2a17ec6 100644 (file)
@@ -71,11 +71,6 @@ properties:
       - description: number of output lines for the green channel (G)
       - description: number of output lines for the blue channel (B)
 
-  arm,malidp-arqos-high-level:
-    $ref: /schemas/types.yaml#/definitions/uint32
-    description:
-      integer describing the ARQoS levels of DP500's QoS signaling
-
   arm,malidp-arqos-value:
     $ref: /schemas/types.yaml#/definitions/uint32
     description:
@@ -113,7 +108,7 @@ examples:
         clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
         clock-names = "pxlclk", "mclk", "aclk", "pclk";
         arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
-        arm,malidp-arqos-high-level = <0xd000d000>;
+        arm,malidp-arqos-value = <0xd000d000>;
 
         port {
             dp0_output: endpoint {
index b41991e..d3c3e4b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DPU dt properties for SC7180 target
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 description: |
   Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates
index 6e417d0..f427eec 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DPU dt properties for SC7280
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 description: |
   Device tree bindings for MSM Mobile Display Subsystem (MDSS) that encapsulates
index 1a42491..2bb8896 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DPU dt properties for SDM845 target
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 description: |
   Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates
index 7095ec3..880bfe9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DSI controller
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 allOf:
   - $ref: "../dsi-controller.yaml#"
index 2d5a766..716f921 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DSI 10nm PHY
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 allOf:
   - $ref: dsi-phy-common.yaml#
index 81dbee4..1342d74 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DSI 14nm PHY
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 allOf:
   - $ref: dsi-phy-common.yaml#
index b8de785..9c1f914 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DSI 20nm PHY
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 allOf:
   - $ref: dsi-phy-common.yaml#
index 69eecaa..3d8540a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Display DSI 28nm PHY
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 allOf:
   - $ref: dsi-phy-common.yaml#
index 502bdda..76d40f7 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Description of Qualcomm Display DSI PHY common dt properties
 
 maintainers:
-  - Krishna Manikandan <mkrishn@codeaurora.org>
+  - Krishna Manikandan <quic_mkrishn@quicinc.com>
 
 description: |
   This defines the DSI PHY dt properties which are common for all
index 9c27ed6..4a4df4f 100644 (file)
@@ -9,7 +9,7 @@ Requires node properties:
        "arm,vexpress-power"
        "arm,vexpress-energy"
 - "arm,vexpress-sysreg,func" when controlled via vexpress-sysreg
-  (see Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
+  (see Documentation/devicetree/bindings/arm/vexpress-config.yaml
   for more details)
 
 Optional node properties:
diff --git a/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml b/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml
new file mode 100644 (file)
index 0000000..be1539d
--- /dev/null
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/xen,grant-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xen specific IOMMU for virtualized devices (e.g. virtio)
+
+maintainers:
+  - Stefano Stabellini <sstabellini@kernel.org>
+
+description:
+  The Xen IOMMU represents the Xen grant table interface. Grant mappings
+  are to be used with devices connected to the Xen IOMMU using the "iommus"
+  property, which also specifies the ID of the backend domain.
+  The binding is required to restrict memory access using Xen grant mappings.
+
+properties:
+  compatible:
+    const: xen,grant-dma
+
+  '#iommu-cells':
+    const: 1
+    description:
+      The single cell is the domid (domain ID) of the domain where the backend
+      is running.
+
+required:
+  - compatible
+  - "#iommu-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    iommu {
+        compatible = "xen,grant-dma";
+        #iommu-cells = <1>;
+    };
index c7cfa6c..935d63d 100644 (file)
@@ -150,7 +150,6 @@ allOf:
           description: 5 memory controller channels and 1 for stream-id registers
 
         reg-names:
-          maxItems: 6
           items:
             - const: sid
             - const: broadcast
@@ -170,7 +169,6 @@ allOf:
           description: 17 memory controller channels and 1 for stream-id registers
 
         reg-names:
-          minItems: 18
           items:
             - const: sid
             - const: broadcast
@@ -202,7 +200,6 @@ allOf:
           description: 17 memory controller channels and 1 for stream-id registers
 
         reg-names:
-          minItems: 18
           items:
             - const: sid
             - const: broadcast
index 74a6867..edac14a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: MAX77714 PMIC with GPIO, RTC and watchdog from Maxim Integrated.
 
 maintainers:
-  - Luca Ceresoli <luca@lucaceresoli.net>
+  - Luca Ceresoli <luca.ceresoli@bootlin.com>
 
 description: |
   MAX77714 is a Power Management IC with 4 buck regulators, 9
index b672202..5ecdac9 100644 (file)
@@ -75,7 +75,6 @@ examples:
       sd-uhs-sdr104;
       sdhci,auto-cmd12;
       interrupts = <0x0 0x26 0x4>;
-      interrupt-names = "sdio0_0";
       clocks = <&scmi_clk 245>;
       clock-names = "sw_sdio";
     };
@@ -94,7 +93,6 @@ examples:
       non-removable;
       bus-width = <0x8>;
       interrupts = <0x0 0x27 0x4>;
-      interrupt-names = "sdio1_0";
       clocks = <&scmi_clk 245>;
       clock-names = "sw_sdio";
     };
index c79639e..3ee7588 100644 (file)
@@ -56,6 +56,9 @@ properties:
       - const: core
       - const: axi
 
+  interrupts:
+    maxItems: 1
+
   marvell,xenon-sdhc-id:
     $ref: /schemas/types.yaml#/definitions/uint32
     minimum: 0
@@ -145,7 +148,6 @@ allOf:
           items:
             - description: Xenon IP registers
             - description: Armada 3700 SoC PHY PAD Voltage Control register
-          minItems: 2
 
         marvell,pad-type:
           $ref: /schemas/types.yaml#/definitions/string
index ddff923..34dd1cc 100644 (file)
@@ -55,7 +55,6 @@ properties:
     maxItems: 1
 
   apple,sart:
-    maxItems: 1
     $ref: /schemas/types.yaml#/definitions/phandle
     description: |
       Reference to the SART address filter.
index de6a706..35f03df 100644 (file)
@@ -9,7 +9,7 @@ Required properties:
 - resets               : list of phandle and reset specifier pairs. There should be two entries, one
                          for the whole phy and one for the port
 - reset-names          : list of reset signal names. Should be "global" and "port"
-See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
+See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml
 See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
index 60dc278..b078009 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Qualcomm QMP USB3 DP PHY controller
 
 maintainers:
-  - Manu Gautam <mgautam@codeaurora.org>
+  - Wesley Cheng <quic_wcheng@quicinc.com>
 
 properties:
   compatible:
index 0ab3dad..d68ab49 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Qualcomm QUSB2 phy controller
 
 maintainers:
-  - Manu Gautam <mgautam@codeaurora.org>
+  - Wesley Cheng <quic_wcheng@quicinc.com>
 
 description:
   QUSB2 controller supports LS/FS/HS usb connectivity on Qualcomm chipsets.
index 1ce251d..7a0e6a9 100644 (file)
@@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Qualcomm Synopsys Femto High-Speed USB PHY V2
 
 maintainers:
-  - Wesley Cheng <wcheng@codeaurora.org>
+  - Wesley Cheng <quic_wcheng@quicinc.com>
 
 description: |
   Qualcomm High-Speed USB PHY
index cbcbd31..939cb5b 100644 (file)
@@ -27,7 +27,7 @@ Required properties:
 - pins: List of pins. Valid values of pins properties are: gpio0, gpio1.
 
 First 2 properties must be added in the RK805 PMIC node, documented in
-Documentation/devicetree/bindings/mfd/rk808.txt
+Documentation/devicetree/bindings/mfd/rockchip,rk808.yaml
 
 Optional properties:
 -------------------
index 4d820df..6f17f39 100644 (file)
@@ -32,31 +32,37 @@ patternProperties:
           groups:
             description: The pin group to select.
             enum: [
+              # common
+              i2c, spi, wdt,
+
               # For MT7620 SoC
-              ephy, i2c, mdio, nd_sd, pa, pcie, rgmii1, rgmii2, spi, spi refclk,
-              uartf, uartlite, wdt, wled,
+              ephy, mdio, nd_sd, pa, pcie, rgmii1, rgmii2, spi refclk,
+              uartf, uartlite, wled,
 
               # For MT7628 and MT7688 SoCs
-              gpio, i2c, i2s, p0led_an, p0led_kn, p1led_an, p1led_kn, p2led_an,
+              gpio, i2s, p0led_an, p0led_kn, p1led_an, p1led_kn, p2led_an,
               p2led_kn, p3led_an, p3led_kn, p4led_an, p4led_kn, perst, pwm0,
-              pwm1, refclk, sdmode, spi, spi cs1, spis, uart0, uart1, uart2,
-              wdt, wled_an, wled_kn,
+              pwm1, refclk, sdmode, spi cs1, spis, uart0, uart1, uart2,
+              wled_an, wled_kn,
             ]
 
           function:
             description: The mux function to select.
             enum: [
+              # common
+              gpio, i2c, refclk, spi,
+
               # For MT7620 SoC
-              ephy, gpio, gpio i2s, gpio uartf, i2c, i2s uartf, mdio, nand, pa,
-              pcie refclk, pcie rst, pcm gpio, pcm i2s, pcm uartf, refclk,
-              rgmii1, rgmii2, sd, spi, spi refclk, uartf, uartlite, wdt refclk,
+              ephy, gpio i2s, gpio uartf, i2s uartf, mdio, nand, pa,
+              pcie refclk, pcie rst, pcm gpio, pcm i2s, pcm uartf,
+              rgmii1, rgmii2, sd, spi refclk, uartf, uartlite, wdt refclk,
               wdt rst, wled,
 
               # For MT7628 and MT7688 SoCs
-              antenna, debug, gpio, i2c, i2s, jtag, p0led_an, p0led_kn,
+              antenna, debug, i2s, jtag, p0led_an, p0led_kn,
               p1led_an, p1led_kn, p2led_an, p2led_kn, p3led_an, p3led_kn,
               p4led_an, p4led_kn, pcie, pcm, perst, pwm, pwm0, pwm1, pwm_uart2,
-              refclk, rsvd, sdxc, sdxc d5 d4, sdxc d6, sdxc d7, spi, spi cs1,
+              rsvd, sdxc, sdxc d5 d4, sdxc d6, sdxc d7, spi cs1,
               spis, sw_r, uart0, uart1, uart2, utif, wdt, wled_an, wled_kn, -,
             ]
 
index 425401c..f602a5d 100644 (file)
@@ -33,32 +33,29 @@ patternProperties:
           groups:
             description: The pin group to select.
             enum: [
+              # common
+              i2c, jtag, led, mdio, rgmii, spi, spi_cs1, uartf, uartlite,
+
               # For RT3050, RT3052 and RT3350 SoCs
-              i2c, jtag, mdio, rgmii, sdram, spi, uartf, uartlite,
+              sdram,
 
               # For RT3352 SoC
-              i2c, jtag, led, lna, mdio, pa, rgmii, spi, spi_cs1, uartf,
-              uartlite,
-
-              # For RT5350 SoC
-              i2c, jtag, led, spi, spi_cs1, uartf, uartlite,
+              lna, pa
             ]
 
           function:
             description: The mux function to select.
             enum: [
+              # common
+              gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, mdio,
+              pcm gpio, pcm i2s, pcm uartf, rgmii, spi, spi_cs1, uartf,
+              uartlite, wdg_cs1,
+
               # For RT3050, RT3052 and RT3350 SoCs
-              gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, mdio, pcm gpio,
-              pcm i2s, pcm uartf, rgmii, sdram, spi, uartf, uartlite,
+              sdram,
 
               # For RT3352 SoC
-              gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, lna, mdio,
-              pa, pcm gpio, pcm i2s, pcm uartf, rgmii, spi, spi_cs1, uartf,
-              uartlite, wdg_cs1,
-
-              # For RT5350 SoC
-              gpio, gpio i2s, gpio uartf, i2c, i2s uartf, jtag, led, pcm gpio,
-              pcm i2s, pcm uartf, spi, spi_cs1, uartf, uartlite, wdg_cs1,
+              lna, pa
             ]
 
         required:
index 675b9b2..f23dcc5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim Integrated MAX77976 Battery charger
 
 maintainers:
-  - Luca Ceresoli <luca@lucaceresoli.net>
+  - Luca Ceresoli <luca.ceresoli@bootlin.com>
 
 description: |
   The Maxim MAX77976 is a 19Vin / 5.5A, 1-Cell Li+ battery charger
index 12ed98c..dbe78cd 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: The Qualcomm PMIC VBUS output regulator driver
 
 maintainers:
-  - Wesley Cheng <wcheng@codeaurora.org>
+  - Wesley Cheng <quic_wcheng@quicinc.com>
 
 description: |
   This regulator driver controls the VBUS output by the Qualcomm PMIC.  This
index d775f72..1c2e92c 100644 (file)
@@ -4,7 +4,7 @@ Versatile Express voltage regulators
 Requires node properties:
 - "compatible" value: "arm,vexpress-volt"
 - "arm,vexpress-sysreg,func" when controlled via vexpress-sysreg
-  (see Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
+  (see Documentation/devicetree/bindings/arm/vexpress-config.yaml
   for more details)
 
 Required regulator properties:
index bf73de0..4aa3684 100644 (file)
@@ -13,7 +13,7 @@ Required properties:
  - resets      : list of phandle and reset specifier pairs. There should be two entries, one
                  for the powerdown and softreset lines of the usb3 IP
  - reset-names : list of reset signal names. Names should be "powerdown" and "softreset"
-See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
+See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml
 See: Documentation/devicetree/bindings/reset/reset.txt
 
  - #address-cells, #size-cells : should be '1' if the device has sub-nodes
index 065c91d..d6f2bde 100644 (file)
@@ -17,7 +17,7 @@ See: Documentation/devicetree/bindings/clock/clock-bindings.txt
  - resets              : phandle + reset specifier pairs to the powerdown and softreset lines
                          of the USB IP
  - reset-names         : should be "power" and "softreset"
-See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
+See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml
 See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
index 44c998c..1c73557 100644 (file)
@@ -15,7 +15,7 @@ See: Documentation/devicetree/bindings/clock/clock-bindings.txt
 
  - resets              : phandle to the powerdown and reset controller for the USB IP
  - reset-names         : should be "power" and "softreset".
-See: Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
+See: Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml
 See: Documentation/devicetree/bindings/reset/reset.txt
 
 Example:
index e336fe2..749e196 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm SuperSpeed DWC3 USB SoC controller
 
 maintainers:
-  - Manu Gautam <mgautam@codeaurora.org>
+  - Wesley Cheng <quic_wcheng@quicinc.com>
 
 properties:
   compatible:
index 6bb20b4..0496773 100644 (file)
@@ -143,6 +143,9 @@ patternProperties:
     description: ASPEED Technology Inc.
   "^asus,.*":
     description: AsusTek Computer Inc.
+  "^atheros,.*":
+    description: Qualcomm Atheros, Inc. (deprecated, use qca)
+    deprecated: true
   "^atlas,.*":
     description: Atlas Scientific LLC
   "^atmel,.*":
index cbcf19f..ed6c1ca 100644 (file)
@@ -64,7 +64,6 @@ if:
 then:
   properties:
     clocks:
-      minItems: 2
       items:
         - description: High-frequency oscillator input, divided internally
         - description: Low-frequency oscillator input
diff --git a/Documentation/driver-api/hte/hte.rst b/Documentation/driver-api/hte/hte.rst
new file mode 100644 (file)
index 0000000..153f323
--- /dev/null
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+============================================
+The Linux Hardware Timestamping Engine (HTE)
+============================================
+
+:Author: Dipen Patel
+
+Introduction
+------------
+
+Certain devices have built in hardware timestamping engines which can
+monitor sets of system signals, lines, buses etc... in realtime for state
+change; upon detecting the change they can automatically store the timestamp at
+the moment of occurrence. Such functionality may help achieve better accuracy
+in obtaining timestamps than using software counterparts i.e. ktime and
+friends.
+
+This document describes the API that can be used by hardware timestamping
+engine provider and consumer drivers that want to use the hardware timestamping
+engine (HTE) framework. Both consumers and providers must include
+``#include <linux/hte.h>``.
+
+The HTE framework APIs for the providers
+----------------------------------------
+
+.. kernel-doc:: drivers/hte/hte.c
+   :functions: devm_hte_register_chip hte_push_ts_ns
+
+The HTE framework APIs for the consumers
+----------------------------------------
+
+.. kernel-doc:: drivers/hte/hte.c
+   :functions: hte_init_line_attr hte_ts_get hte_ts_put devm_hte_request_ts_ns hte_request_ts_ns hte_enable_ts hte_disable_ts of_hte_req_count hte_get_clk_src_info
+
+The HTE framework public structures
+-----------------------------------
+.. kernel-doc:: include/linux/hte.h
+
+More on the HTE timestamp data
+------------------------------
+The ``struct hte_ts_data`` is used to pass timestamp details between the
+consumers and the providers. It expresses timestamp data in nanoseconds in
+u64. An example of the typical timestamp data life cycle, for the GPIO line is
+as follows::
+
+ - Monitors GPIO line change.
+ - Detects the state change on GPIO line.
+ - Converts timestamps in nanoseconds.
+ - Stores GPIO raw level in raw_level variable if the provider has that
+ hardware capability.
+ - Pushes this hte_ts_data object to HTE subsystem.
+ - HTE subsystem increments seq counter and invokes consumer provided callback.
+ Based on callback return value, the HTE core invokes secondary callback in
+ the thread context.
+
+HTE subsystem debugfs attributes
+--------------------------------
+HTE subsystem creates debugfs attributes at ``/sys/kernel/debug/hte/``.
+It also creates line/signal-related debugfs attributes at
+``/sys/kernel/debug/hte/<provider>/<label or line id>/``. Note that these
+attributes are read-only.
+
+`ts_requested`
+               The total number of entities requested from the given provider,
+               where entity is specified by the provider and could represent
+               lines, GPIO, chip signals, buses etc...
+                The attribute will be available at
+               ``/sys/kernel/debug/hte/<provider>/``.
+
+`total_ts`
+               The total number of entities supported by the provider.
+                The attribute will be available at
+               ``/sys/kernel/debug/hte/<provider>/``.
+
+`dropped_timestamps`
+               The dropped timestamps for a given line.
+                The attribute will be available at
+               ``/sys/kernel/debug/hte/<provider>/<label or line id>/``.
diff --git a/Documentation/driver-api/hte/index.rst b/Documentation/driver-api/hte/index.rst
new file mode 100644 (file)
index 0000000..9f43301
--- /dev/null
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================
+The Linux Hardware Timestamping Engine (HTE)
+============================================
+
+The HTE Subsystem
+=================
+
+.. toctree::
+   :maxdepth: 1
+
+   hte
+
+HTE Tegra Provider
+==================
+
+.. toctree::
+   :maxdepth: 1
+
+   tegra194-hte
+
diff --git a/Documentation/driver-api/hte/tegra194-hte.rst b/Documentation/driver-api/hte/tegra194-hte.rst
new file mode 100644 (file)
index 0000000..41983e0
--- /dev/null
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+HTE Kernel provider driver
+==========================
+
+Description
+-----------
+The Nvidia tegra194 HTE provider driver implements two GTE
+(Generic Timestamping Engine) instances: 1) GPIO GTE and 2) LIC
+(Legacy Interrupt Controller) IRQ GTE. Both GTE instances get the
+timestamp from the system counter TSC which has 31.25MHz clock rate, and the
+driver converts clock tick rate to nanoseconds before storing it as timestamp
+value.
+
+GPIO GTE
+--------
+
+This GTE instance timestamps GPIO in real time. For that to happen GPIO
+needs to be configured as input. The always on (AON) GPIO controller instance
+supports timestamping GPIOs in real time and it has 39 GPIO lines. The GPIO GTE
+and AON GPIO controller are tightly coupled as it requires very specific bits
+to be set in GPIO config register before GPIO GTE can be used, for that GPIOLIB
+adds two optional APIs as below. The GPIO GTE code supports both kernel
+and userspace consumers. The kernel space consumers can directly talk to HTE
+subsystem while userspace consumers timestamp requests go through GPIOLIB CDEV
+framework to HTE subsystem.
+
+.. kernel-doc:: drivers/gpio/gpiolib.c
+   :functions: gpiod_enable_hw_timestamp_ns gpiod_disable_hw_timestamp_ns
+
+For userspace consumers, GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE flag must be
+specified during IOCTL calls. Refer to ``tools/gpio/gpio-event-mon.c``, which
+returns the timestamp in nanoseconds.
+
+LIC (Legacy Interrupt Controller) IRQ GTE
+-----------------------------------------
+
+This GTE instance timestamps LIC IRQ lines in real time. There are 352 IRQ
+lines which this instance can add timestamps to in real time. The hte
+devicetree binding described at ``Documentation/devicetree/bindings/hte/``
+provides an example of how a consumer can request an IRQ line. Since it is a
+one-to-one mapping with IRQ GTE provider, consumers can simply specify the IRQ
+number that they are interested in. There is no userspace consumer support for
+this GTE instance in the HTE framework.
+
+The provider source code of both IRQ and GPIO GTE instances is located at
+``drivers/hte/hte-tegra194.c``. The test driver
+``drivers/hte/hte-tegra194-test.c`` demonstrates HTE API usage for both IRQ
+and GPIO GTE.
index d76a60d..a6d525c 100644 (file)
@@ -108,6 +108,7 @@ available subsections can be seen below.
    xilinx/index
    xillybus
    zorro
+   hte/index
 
 .. only::  subproject and html
 
index 10482de..a053667 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index bcefb5a..c0bb9c9 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index d80d994..c9bfff2 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: |  ok  |
     |        ia64: |  ok  |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 53eab15..35e2a44 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 9492645..9b3e2ce 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index b4274b8..9c7ffec 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: |  ok  |
     |        ia64: |  ok  |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index c15bb4b..2fd5fb6 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index 4c31fc9..c45711e 100644 (file)
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
     |       nios2: | TODO |
     |    openrisc: | TODO |
-    |      parisc: | TODO |
+    |      parisc: |  ok  |
     |     powerpc: |  ok  |
     |       riscv: |  ok  |
     |        s390: |  ok  |
index d7a5ac4..502c1d4 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: |  ok  |
     |        mips: |  ok  |
@@ -24,7 +25,7 @@
     |        s390: |  ok  |
     |          sh: |  ok  |
     |       sparc: | TODO |
-    |          um: | TODO |
+    |          um: |  ok  |
     |         x86: |  ok  |
     |      xtensa: | TODO |
     -----------------------
index 136e14c..afb90be 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 5b3f3d8..04120d2 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: |  ok  |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: |  ok  |
     |        mips: |  ok  |
index 7a2eab4..e487c35 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: |  ok  |
     |        mips: |  ok  |
index db02ab1..b3697f4 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index ec186e7..452385a 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: |  ok  |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 4b7865e..daecf04 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: |  ok  |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 5d9befa..adb1bd0 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index d97fd38..ddcd716 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index d30e347..2512120 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 9ae1fa2..f2fcff8 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index 9e09988..95e485c 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: |  ok  |
     |        mips: |  ok  |
index 5c4ec31..8b1a8d9 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index 65007c1..ab69e8f 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: |  ok  |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: |  ok  |
     |        mips: |  ok  |
index 2005667..0bfb72a 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
@@ -20,7 +21,7 @@
     |    openrisc: |  ok  |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: | TODO |
     |       sparc: |  ok  |
index 707514f..d2f2201 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 9f31ce9..0d0647b 100644 (file)
@@ -7,12 +7,13 @@
     |         arch |status|
     -----------------------
     |       alpha: | TODO |
-    |         arc: | TODO |
+    |         arc: |  ok  |
     |         arm: |  ok  |
     |       arm64: |  ok  |
     |        csky: |  ok  |
     |     hexagon: |  ok  |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index f148c43..13c297b 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 32c88b6..931687e 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index d82a1f0..336d728 100644 (file)
@@ -36,6 +36,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index 2687564..76d0121 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ..  |
     |     hexagon: |  ..  |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: |  ..  |
     |  microblaze: |  ..  |
     |        mips: | TODO |
index 1b41091..a86b8b1 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 2732725..364169f 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index b9a4bda..6ea2747 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: |  ok  |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: |  ok  |
     |        mips: |  ok  |
index 4aa51c9..c9e0a16 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 0306ece..fd17d8d 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: |  ..  |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 5d64e40..1a859ac 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ok  |
     |     hexagon: | TODO |
     |        ia64: |  ok  |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 92c9db2..b122995 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 7424fea..02f325f 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: |  ok  |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index 6098506..9bfff97 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: |  ..  |
     |     hexagon: |  ..  |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: |  ..  |
     |  microblaze: |  ..  |
     |        mips: |  ok  |
index f2dcbec..039e4e9 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: |  ..  |
     |  microblaze: |  ..  |
     |        mips: | TODO |
index 680090d..13b4940 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: | TODO |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: | TODO |
index 205a90e..b01bf7b 100644 (file)
@@ -13,6 +13,7 @@
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
index 9f16d6e..fc3687b 100644 (file)
     |        csky: | TODO |
     |     hexagon: | TODO |
     |        ia64: | TODO |
+    |       loong: |  ok  |
     |        m68k: | TODO |
     |  microblaze: | TODO |
     |        mips: |  ok  |
     |       nios2: | TODO |
     |    openrisc: | TODO |
-    |      parisc: | TODO |
+    |      parisc: |  ok  |
     |     powerpc: |  ok  |
     |       riscv: |  ok  |
     |        s390: |  ok  |
index a80a599..4d19b19 100644 (file)
@@ -37,30 +37,31 @@ The network filesystem helper library needs a place to store a bit of state for
 its use on each netfs inode it is helping to manage.  To this end, a context
 structure is defined::
 
-       struct netfs_i_context {
+       struct netfs_inode {
+               struct inode inode;
                const struct netfs_request_ops *ops;
-               struct fscache_cookie   *cache;
+               struct fscache_cookie *cache;
        };
 
-A network filesystem that wants to use netfs lib must place one of these
-directly after the VFS ``struct inode`` it allocates, usually as part of its
-own struct.  This can be done in a way similar to the following::
+A network filesystem that wants to use netfs lib must place one of these in its
+inode wrapper struct instead of the VFS ``struct inode``.  This can be done in
+a way similar to the following::
 
        struct my_inode {
-               struct {
-                       /* These must be contiguous */
-                       struct inode            vfs_inode;
-                       struct netfs_i_context  netfs_ctx;
-               };
+               struct netfs_inode netfs; /* Netfslib context and vfs inode */
                ...
        };
 
-This allows netfslib to find its state by simple offset from the inode pointer,
-thereby allowing the netfslib helper functions to be pointed to directly by the
-VFS/VM operation tables.
+This allows netfslib to find its state by using ``container_of()`` from the
+inode pointer, thereby allowing the netfslib helper functions to be pointed to
+directly by the VFS/VM operation tables.
 
 The structure contains the following fields:
 
+ * ``inode``
+
+   The VFS inode structure.
+
  * ``ops``
 
    The set of operations provided by the network filesystem to netfslib.
@@ -78,19 +79,17 @@ To help deal with the per-inode context, a number helper functions are
 provided.  Firstly, a function to perform basic initialisation on a context and
 set the operations table pointer::
 
-       void netfs_i_context_init(struct inode *inode,
-                                 const struct netfs_request_ops *ops);
+       void netfs_inode_init(struct netfs_inode *ctx,
+                             const struct netfs_request_ops *ops);
 
-then two functions to cast between the VFS inode structure and the netfs
-context::
+then a function to cast from the VFS inode structure to the netfs context::
 
-       struct netfs_i_context *netfs_i_context(struct inode *inode);
-       struct inode *netfs_inode(struct netfs_i_context *ctx);
+       struct netfs_inode *netfs_node(struct inode *inode);
 
 and finally, a function to get the cache cookie pointer from the context
 attached to an inode (or NULL if fscache is disabled)::
 
-       struct fscache_cookie *netfs_i_cookie(struct inode *inode);
+       struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx);
 
 
 Buffered Read Helpers
@@ -137,8 +136,9 @@ Three read helpers are provided::
 
        void netfs_readahead(struct readahead_control *ractl);
        int netfs_read_folio(struct file *file,
-                          struct folio *folio);
-       int netfs_write_begin(struct file *file,
+                            struct folio *folio);
+       int netfs_write_begin(struct netfs_inode *ctx,
+                             struct file *file,
                              struct address_space *mapping,
                              loff_t pos,
                              unsigned int len,
@@ -158,9 +158,10 @@ The helpers manage the read request, calling back into the network filesystem
 through the suppplied table of operations.  Waits will be performed as
 necessary before returning for helpers that are meant to be synchronous.
 
-If an error occurs and netfs_priv is non-NULL, ops->cleanup() will be called to
-deal with it.  If some parts of the request are in progress when an error
-occurs, the request will get partially completed if sufficient data is read.
+If an error occurs, the ->free_request() will be called to clean up the
+netfs_io_request struct allocated.  If some parts of the request are in
+progress when an error occurs, the request will get partially completed if
+sufficient data is read.
 
 Additionally, there is::
 
@@ -208,8 +209,7 @@ The above fields are the ones the netfs can use.  They are:
  * ``netfs_priv``
 
    The network filesystem's private data.  The value for this can be passed in
-   to the helper functions or set during the request.  The ->cleanup() op will
-   be called if this is non-NULL at the end.
+   to the helper functions or set during the request.
 
  * ``start``
  * ``len``
@@ -294,6 +294,7 @@ through which it can issue requests and negotiate::
 
        struct netfs_request_ops {
                void (*init_request)(struct netfs_io_request *rreq, struct file *file);
+               void (*free_request)(struct netfs_io_request *rreq);
                int (*begin_cache_operation)(struct netfs_io_request *rreq);
                void (*expand_readahead)(struct netfs_io_request *rreq);
                bool (*clamp_length)(struct netfs_io_subrequest *subreq);
@@ -302,7 +303,6 @@ through which it can issue requests and negotiate::
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                         struct folio *folio, void **_fsdata);
                void (*done)(struct netfs_io_request *rreq);
-               void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
 
 The operations are as follows:
@@ -310,7 +310,12 @@ The operations are as follows:
  * ``init_request()``
 
    [Optional] This is called to initialise the request structure.  It is given
-   the file for reference and can modify the ->netfs_priv value.
+   the file for reference.
+
+ * ``free_request()``
+
+   [Optional] This is called as the request is being deallocated so that the
+   filesystem can clean up any state it has attached there.
 
  * ``begin_cache_operation()``
 
@@ -384,11 +389,6 @@ The operations are as follows:
    [Optional] This is called after the folios in the request have all been
    unlocked (and marked uptodate if applicable).
 
- * ``cleanup``
-
-   [Optional] This is called as the request is being deallocated so that the
-   filesystem can clean up ->netfs_priv.
-
 
 
 Read Helper Procedure
diff --git a/Documentation/hte/hte.rst b/Documentation/hte/hte.rst
deleted file mode 100644 (file)
index 153f323..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0+
-
-============================================
-The Linux Hardware Timestamping Engine (HTE)
-============================================
-
-:Author: Dipen Patel
-
-Introduction
-------------
-
-Certain devices have built in hardware timestamping engines which can
-monitor sets of system signals, lines, buses etc... in realtime for state
-change; upon detecting the change they can automatically store the timestamp at
-the moment of occurrence. Such functionality may help achieve better accuracy
-in obtaining timestamps than using software counterparts i.e. ktime and
-friends.
-
-This document describes the API that can be used by hardware timestamping
-engine provider and consumer drivers that want to use the hardware timestamping
-engine (HTE) framework. Both consumers and providers must include
-``#include <linux/hte.h>``.
-
-The HTE framework APIs for the providers
-----------------------------------------
-
-.. kernel-doc:: drivers/hte/hte.c
-   :functions: devm_hte_register_chip hte_push_ts_ns
-
-The HTE framework APIs for the consumers
-----------------------------------------
-
-.. kernel-doc:: drivers/hte/hte.c
-   :functions: hte_init_line_attr hte_ts_get hte_ts_put devm_hte_request_ts_ns hte_request_ts_ns hte_enable_ts hte_disable_ts of_hte_req_count hte_get_clk_src_info
-
-The HTE framework public structures
------------------------------------
-.. kernel-doc:: include/linux/hte.h
-
-More on the HTE timestamp data
-------------------------------
-The ``struct hte_ts_data`` is used to pass timestamp details between the
-consumers and the providers. It expresses timestamp data in nanoseconds in
-u64. An example of the typical timestamp data life cycle, for the GPIO line is
-as follows::
-
- - Monitors GPIO line change.
- - Detects the state change on GPIO line.
- - Converts timestamps in nanoseconds.
- - Stores GPIO raw level in raw_level variable if the provider has that
- hardware capability.
- - Pushes this hte_ts_data object to HTE subsystem.
- - HTE subsystem increments seq counter and invokes consumer provided callback.
- Based on callback return value, the HTE core invokes secondary callback in
- the thread context.
-
-HTE subsystem debugfs attributes
---------------------------------
-HTE subsystem creates debugfs attributes at ``/sys/kernel/debug/hte/``.
-It also creates line/signal-related debugfs attributes at
-``/sys/kernel/debug/hte/<provider>/<label or line id>/``. Note that these
-attributes are read-only.
-
-`ts_requested`
-               The total number of entities requested from the given provider,
-               where entity is specified by the provider and could represent
-               lines, GPIO, chip signals, buses etc...
-                The attribute will be available at
-               ``/sys/kernel/debug/hte/<provider>/``.
-
-`total_ts`
-               The total number of entities supported by the provider.
-                The attribute will be available at
-               ``/sys/kernel/debug/hte/<provider>/``.
-
-`dropped_timestamps`
-               The dropped timestamps for a given line.
-                The attribute will be available at
-               ``/sys/kernel/debug/hte/<provider>/<label or line id>/``.
diff --git a/Documentation/hte/index.rst b/Documentation/hte/index.rst
deleted file mode 100644 (file)
index 9f43301..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-============================================
-The Linux Hardware Timestamping Engine (HTE)
-============================================
-
-The HTE Subsystem
-=================
-
-.. toctree::
-   :maxdepth: 1
-
-   hte
-
-HTE Tegra Provider
-==================
-
-.. toctree::
-   :maxdepth: 1
-
-   tegra194-hte
-
diff --git a/Documentation/hte/tegra194-hte.rst b/Documentation/hte/tegra194-hte.rst
deleted file mode 100644 (file)
index 41983e0..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0+
-
-HTE Kernel provider driver
-==========================
-
-Description
------------
-The Nvidia tegra194 HTE provider driver implements two GTE
-(Generic Timestamping Engine) instances: 1) GPIO GTE and 2) LIC
-(Legacy Interrupt Controller) IRQ GTE. Both GTE instances get the
-timestamp from the system counter TSC which has 31.25MHz clock rate, and the
-driver converts clock tick rate to nanoseconds before storing it as timestamp
-value.
-
-GPIO GTE
---------
-
-This GTE instance timestamps GPIO in real time. For that to happen GPIO
-needs to be configured as input. The always on (AON) GPIO controller instance
-supports timestamping GPIOs in real time and it has 39 GPIO lines. The GPIO GTE
-and AON GPIO controller are tightly coupled as it requires very specific bits
-to be set in GPIO config register before GPIO GTE can be used, for that GPIOLIB
-adds two optional APIs as below. The GPIO GTE code supports both kernel
-and userspace consumers. The kernel space consumers can directly talk to HTE
-subsystem while userspace consumers timestamp requests go through GPIOLIB CDEV
-framework to HTE subsystem.
-
-.. kernel-doc:: drivers/gpio/gpiolib.c
-   :functions: gpiod_enable_hw_timestamp_ns gpiod_disable_hw_timestamp_ns
-
-For userspace consumers, GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE flag must be
-specified during IOCTL calls. Refer to ``tools/gpio/gpio-event-mon.c``, which
-returns the timestamp in nanoseconds.
-
-LIC (Legacy Interrupt Controller) IRQ GTE
------------------------------------------
-
-This GTE instance timestamps LIC IRQ lines in real time. There are 352 IRQ
-lines which this instance can add timestamps to in real time. The hte
-devicetree binding described at ``Documentation/devicetree/bindings/hte/``
-provides an example of how a consumer can request an IRQ line. Since it is a
-one-to-one mapping with IRQ GTE provider, consumers can simply specify the IRQ
-number that they are interested in. There is no userspace consumer support for
-this GTE instance in the HTE framework.
-
-The provider source code of both IRQ and GPIO GTE instances is located at
-``drivers/hte/hte-tegra194.c``. The test driver
-``drivers/hte/hte-tegra194-test.c`` demonstrates HTE API usage for both IRQ
-and GPIO GTE.
index 8f9be0e..67036a0 100644 (file)
@@ -137,7 +137,6 @@ needed).
    scheduler/index
    mhi/index
    peci/index
-   hte/index
 
 Architecture-agnostic documentation
 -----------------------------------
index 34415ae..19c286c 100644 (file)
@@ -32,6 +32,7 @@ you probably needn't concern yourself with pcmciautils.
 GNU C                  5.1              gcc --version
 Clang/LLVM (optional)  11.0.0           clang --version
 GNU make               3.81             make --version
+bash                   4.2              bash --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
 bison                  2.0              bison --version
@@ -84,6 +85,12 @@ Make
 
 You will need GNU make 3.81 or later to build the kernel.
 
+Bash
+----
+
+Some bash scripts are used for the kernel build.
+Bash 4.2 or newer is needed.
+
 Binutils
 --------
 
@@ -362,6 +369,11 @@ Make
 
 - <ftp://ftp.gnu.org/gnu/make/>
 
+Bash
+----
+
+- <ftp://ftp.gnu.org/gnu/bash/>
+
 Binutils
 --------
 
index b0bd510..6d5ec1e 100644 (file)
@@ -42,7 +42,7 @@ if usbmon is built into the kernel::
        # modprobe usbmon
        #
 
-Verify that bus sockets are present:
+Verify that bus sockets are present::
 
        # ls /sys/kernel/debug/usb/usbmon
        0s  0u  1s  1t  1u  2s  2t  2u  3s  3t  3u  4s  4t  4u
index a6d3bd9..1fc9ead 100644 (file)
@@ -1507,7 +1507,7 @@ F:        drivers/clocksource/arm_arch_timer.c
 ARM HDLCD DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
 S:     Supported
-F:     Documentation/devicetree/bindings/display/arm,hdlcd.txt
+F:     Documentation/devicetree/bindings/display/arm,hdlcd.yaml
 F:     drivers/gpu/drm/arm/hdlcd_*
 
 ARM INTEGRATOR, VERSATILE AND REALVIEW SUPPORT
@@ -1542,7 +1542,7 @@ M:        Mihail Atanassov <mihail.atanassov@arm.com>
 L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     Documentation/devicetree/bindings/display/arm,komeda.txt
+F:     Documentation/devicetree/bindings/display/arm,komeda.yaml
 F:     Documentation/gpu/komeda-kms.rst
 F:     drivers/gpu/drm/arm/display/include/
 F:     drivers/gpu/drm/arm/display/komeda/
@@ -1564,7 +1564,7 @@ M:        Brian Starkey <brian.starkey@arm.com>
 L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
-F:     Documentation/devicetree/bindings/display/arm,malidp.txt
+F:     Documentation/devicetree/bindings/display/arm,malidp.yaml
 F:     Documentation/gpu/afbc.rst
 F:     drivers/gpu/drm/arm/
 
@@ -2009,7 +2009,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/ulli-kroll/linux.git
 F:     Documentation/devicetree/bindings/arm/gemini.yaml
-F:     Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
+F:     Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml
 F:     Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
 F:     Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
 F:     arch/arm/boot/dts/gemini*
@@ -3757,6 +3757,13 @@ F:       include/linux/bpf_lsm.h
 F:     kernel/bpf/bpf_lsm.c
 F:     security/bpf/
 
+BPFTOOL
+M:     Quentin Monnet <quentin@isovalent.com>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     kernel/bpf/disasm.*
+F:     tools/bpf/bpftool/
+
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
@@ -6078,7 +6085,7 @@ M:        Sakari Ailus <sakari.ailus@linux.intel.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
-F:     Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.txt
+F:     Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.yaml
 F:     drivers/media/i2c/dw9807-vcm.c
 
 DOUBLETALK DRIVER
@@ -7647,6 +7654,7 @@ F:        include/uapi/scsi/fc/
 
 FILE LOCKING (flock() and fcntl()/lockf())
 M:     Jeff Layton <jlayton@kernel.org>
+M:     Chuck Lever <chuck.lever@oracle.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/fcntl.c
@@ -9081,7 +9089,7 @@ HTE SUBSYSTEM
 M:     Dipen Patel <dipenp@nvidia.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/timestamp/
-F:     Documentation/hte/
+F:     Documentation/driver-api/hte/
 F:     drivers/hte/
 F:     include/linux/hte.h
 
@@ -10739,6 +10747,7 @@ W:      http://kernelnewbies.org/KernelJanitors
 
 KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
 M:     Chuck Lever <chuck.lever@oracle.com>
+M:     Jeff Layton <jlayton@kernel.org>
 L:     linux-nfs@vger.kernel.org
 S:     Supported
 W:     http://nfs.sourceforge.net/
@@ -11257,6 +11266,7 @@ M:      Damien Le Moal <damien.lemoal@opensource.wdc.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git
+F:     Documentation/ABI/testing/sysfs-ata
 F:     Documentation/devicetree/bindings/ata/
 F:     drivers/ata/
 F:     include/linux/ata.h
@@ -12696,7 +12706,6 @@ L:      netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
 Q:     https://patchwork.kernel.org/project/netdevbpf/list/
-F:     drivers/net/ethernet/mellanox/mlx5/core/accel/*
 F:     drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
 F:     drivers/net/ethernet/mellanox/mlx5/core/fpga/*
 F:     include/linux/mlx5/mlx5_ifc_fpga.h
@@ -15824,6 +15833,14 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
 F:     drivers/iio/chemical/pms7003.c
 
+PLATFORM FEATURE INFRASTRUCTURE
+M:     Juergen Gross <jgross@suse.com>
+S:     Maintained
+F:     arch/*/include/asm/platform-feature.h
+F:     include/asm-generic/platform-feature.h
+F:     include/linux/platform-feature.h
+F:     kernel/platform-feature.c
+
 PLDMFW LIBRARY
 M:     Jacob Keller <jacob.e.keller@intel.com>
 S:     Maintained
@@ -19220,7 +19237,7 @@ F:      arch/arc/plat-axs10x
 SYNOPSYS AXS10x RESET CONTROLLER DRIVER
 M:     Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
 S:     Supported
-F:     Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
+F:     Documentation/devicetree/bindings/reset/snps,axs10x-reset.yaml
 F:     drivers/reset/reset-axs10x.c
 
 SYNOPSYS CREG GPIO DRIVER
index c43d825..1a6678d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Superb Owl
 
 # *DOCUMENTATION*
@@ -788,6 +788,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG)      := -fstack-protector-strong
 KBUILD_CFLAGS += $(stackp-flags-y)
 
 KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
+KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
 KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
 
 ifdef CONFIG_CC_IS_CLANG
@@ -805,6 +806,9 @@ endif
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
 
+# These result in bogus false positives
+KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
+
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
 else
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
new file mode 100644 (file)
index 0000000..7ebb7eb
--- /dev/null
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <xen/arm/xen-ops.h>
index 82ffac6..059cce0 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/dma-iommu.h>
 #include <asm/mach/map.h>
 #include <asm/system_info.h>
-#include <xen/swiotlb-xen.h>
+#include <asm/xen/xen-ops.h>
 
 #include "dma.h"
 #include "mm.h"
@@ -2287,10 +2287,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 
        set_dma_ops(dev, dma_ops);
 
-#ifdef CONFIG_XEN
-       if (xen_initial_domain())
-               dev->dma_ops = &xen_swiotlb_dma_ops;
-#endif
+       xen_setup_dma_ops(dev);
        dev->archdata.dma_ops_setup = true;
 }
 
index 07eb69f..1f9c3ba 100644 (file)
@@ -443,6 +443,8 @@ static int __init xen_guest_init(void)
        if (!xen_domain())
                return 0;
 
+       xen_set_restricted_virtio_memory_access();
+
        if (!acpi_disabled)
                xen_acpi_guest_init();
        else
index 55f998c..42ff95d 100644 (file)
 #define ID_AA64SMFR0_F32F32_SHIFT      32
 
 #define ID_AA64SMFR0_FA64              0x1
-#define ID_AA64SMFR0_I16I64            0x4
+#define ID_AA64SMFR0_I16I64            0xf
 #define ID_AA64SMFR0_F64F64            0x1
-#define ID_AA64SMFR0_I8I32             0x4
+#define ID_AA64SMFR0_I8I32             0xf
 #define ID_AA64SMFR0_F16F32            0x1
 #define ID_AA64SMFR0_B16F32            0x1
 #define ID_AA64SMFR0_F32F32            0x1
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
new file mode 100644 (file)
index 0000000..7ebb7eb
--- /dev/null
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <xen/arm/xen-ops.h>
index 8199793..aecf307 100644 (file)
@@ -331,7 +331,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
  *    trapping to the kernel.
  *
  *    When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
- *    corresponding Zn), P0-P15 and FFR are encoded in in
+ *    corresponding Zn), P0-P15 and FFR are encoded in
  *    task->thread.sve_state, formatted appropriately for vector
  *    length task->thread.sve_vl or, if SVCR.SM is set,
  *    task->thread.sme_vl.
@@ -1916,10 +1916,15 @@ void __efi_fpsimd_begin(void)
                        if (system_supports_sme()) {
                                svcr = read_sysreg_s(SYS_SVCR);
 
-                               if (!system_supports_fa64())
-                                       ffr = svcr & SVCR_SM_MASK;
+                               __this_cpu_write(efi_sm_state,
+                                                svcr & SVCR_SM_MASK);
 
-                               __this_cpu_write(efi_sm_state, ffr);
+                               /*
+                                * Unless we have FA64 FFR does not
+                                * exist in streaming mode.
+                                */
+                               if (!system_supports_fa64())
+                                       ffr = !(svcr & SVCR_SM_MASK);
                        }
 
                        sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()),
@@ -1964,8 +1969,13 @@ void __efi_fpsimd_end(void)
                                        sysreg_clear_set_s(SYS_SVCR,
                                                           0,
                                                           SVCR_SM_MASK);
+
+                                       /*
+                                        * Unless we have FA64 FFR does not
+                                        * exist in streaming mode.
+                                        */
                                        if (!system_supports_fa64())
-                                               ffr = efi_sm_state;
+                                               ffr = false;
                                }
                        }
 
index 57b30bc..f6b0074 100644 (file)
@@ -244,6 +244,11 @@ static void mte_update_gcr_excl(struct task_struct *task)
                SYS_GCR_EL1);
 }
 
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Only called from assembly, silence sparse */
+void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
+                                __le32 *updptr, int nr_inst);
+
 void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
                                 __le32 *updptr, int nr_inst)
 {
@@ -252,6 +257,7 @@ void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
        if (kasan_hw_tags_enabled())
                *updptr = cpu_to_le32(aarch64_insn_gen_nop());
 }
+#endif
 
 void mte_thread_init_user(void)
 {
index 6719f9e..6099c81 100644 (file)
@@ -9,9 +9,9 @@
 #include <linux/dma-map-ops.h>
 #include <linux/dma-iommu.h>
 #include <xen/xen.h>
-#include <xen/swiotlb-xen.h>
 
 #include <asm/cacheflush.h>
+#include <asm/xen/xen-ops.h>
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
                enum dma_data_direction dir)
@@ -52,8 +52,5 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        if (iommu)
                iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
 
-#ifdef CONFIG_XEN
-       if (xen_swiotlb_detect())
-               dev->dma_ops = &xen_swiotlb_dma_ops;
-#endif
+       xen_setup_dma_ops(dev);
 }
index 8ab4035..42f2e9a 100644 (file)
@@ -1478,6 +1478,7 @@ skip_init_ctx:
                        bpf_jit_binary_free(header);
                        prog->bpf_func = NULL;
                        prog->jited = 0;
+                       prog->jited_len = 0;
                        goto out_off;
                }
                bpf_jit_binary_lock_ro(header);
index 89bfb74..5c55509 100755 (executable)
@@ -253,7 +253,7 @@ END {
        next
 }
 
-/0b[01]+/ && block = "Enum" {
+/0b[01]+/ && block == "Enum" {
        expect_fields(2)
        val = $1
        name = $2
index 80657bf..1920d52 100644 (file)
@@ -343,6 +343,7 @@ config NR_CPUS
 
 config NUMA
        bool "NUMA Support"
+       select SMP
        select ACPI_NUMA if ACPI
        help
          Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
index befe818..0ef3b18 100644 (file)
@@ -19,7 +19,7 @@ typedef struct {
        unsigned int __softirq_pending;
 } ____cacheline_aligned irq_cpustat_t;
 
-DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 
 #define __ARCH_IRQ_STAT
 
index 34f15a6..e6569f1 100644 (file)
@@ -6,6 +6,7 @@
 #define __ASM_PERCPU_H
 
 #include <asm/cmpxchg.h>
+#include <asm/loongarch.h>
 
 /* Use r21 for fast access */
 register unsigned long __my_cpu_offset __asm__("$r21");
index 551e1f3..71189b2 100644 (file)
@@ -9,10 +9,16 @@
 #include <linux/atomic.h>
 #include <linux/bitops.h>
 #include <linux/linkage.h>
-#include <linux/smp.h>
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 
+extern int smp_num_siblings;
+extern int num_processors;
+extern int disabled_cpus;
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map[];
+
 void loongson3_smp_setup(void);
 void loongson3_prepare_cpus(unsigned int max_cpus);
 void loongson3_boot_secondary(int cpu, struct task_struct *idle);
@@ -25,26 +31,11 @@ int loongson3_cpu_disable(void);
 void loongson3_cpu_die(unsigned int cpu);
 #endif
 
-#ifdef CONFIG_SMP
-
 static inline void plat_smp_setup(void)
 {
        loongson3_smp_setup();
 }
 
-#else /* !CONFIG_SMP */
-
-static inline void plat_smp_setup(void) { }
-
-#endif /* !CONFIG_SMP */
-
-extern int smp_num_siblings;
-extern int num_processors;
-extern int disabled_cpus;
-extern cpumask_t cpu_sibling_map[];
-extern cpumask_t cpu_core_map[];
-extern cpumask_t cpu_foreign_map[];
-
 static inline int raw_smp_processor_id(void)
 {
 #if defined(__VDSO__)
index d3ed99a..fb41e9e 100644 (file)
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
 
-/*
- * Standard way to access the cycle counter.
- * Currently only used on SMP for scheduling.
- *
- * We know that all SMP capable CPUs have cycle counters.
- */
-
 typedef unsigned long cycles_t;
 
 #define get_cycles get_cycles
index b16c3de..bb729ee 100644 (file)
@@ -138,6 +138,7 @@ void __init acpi_boot_table_init(void)
        }
 }
 
+#ifdef CONFIG_SMP
 static int set_processor_mask(u32 id, u32 flags)
 {
 
@@ -166,15 +167,18 @@ static int set_processor_mask(u32 id, u32 flags)
 
        return cpu;
 }
+#endif
 
 static void __init acpi_process_madt(void)
 {
+#ifdef CONFIG_SMP
        int i;
 
        for (i = 0; i < NR_CPUS; i++) {
                __cpu_number_map[i] = -1;
                __cpu_logical_map[i] = -1;
        }
+#endif
 
        loongson_sysconf.nr_cpus = num_processors;
 }
index 8c9fe29..b38f548 100644 (file)
@@ -4,6 +4,7 @@
  *
  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  */
+#include <asm/cpu-info.h>
 #include <linux/cacheinfo.h>
 
 /* Populates leaf and increments to next leaf */
index 4b671d3..b34b8d7 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/setup.h>
 
 DEFINE_PER_CPU(unsigned long, irq_stack);
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
 
 struct irq_domain *cpu_domain;
 struct irq_domain *liointc_domain;
@@ -56,8 +58,11 @@ int arch_show_interrupts(struct seq_file *p, int prec)
 
 void __init init_IRQ(void)
 {
-       int i, r, ipi_irq;
+       int i;
+#ifdef CONFIG_SMP
+       int r, ipi_irq;
        static int ipi_dummy_dev;
+#endif
        unsigned int order = get_order(IRQ_STACK_SIZE);
        struct page *page;
 
index 6d944d6..bfa0dfe 100644 (file)
@@ -120,10 +120,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 /*
  * Copy architecture-specific thread state
  */
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-       unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 {
        unsigned long childksp;
+       unsigned long tls = args->tls;
+       unsigned long usp = args->stack;
+       unsigned long clone_flags = args->flags;
        struct pt_regs *childregs, *regs = current_pt_regs();
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
@@ -136,12 +138,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
        p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
        p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
-       if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+       if (unlikely(args->fn)) {
                /* kernel thread */
-               p->thread.reg23 = usp; /* fn */
-               p->thread.reg24 = kthread_arg;
                p->thread.reg03 = childksp;
-               p->thread.reg01 = (unsigned long) ret_from_kernel_thread;
+               p->thread.reg23 = (unsigned long)args->fn;
+               p->thread.reg24 = (unsigned long)args->fn_arg;
+               p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->csr_euen = p->thread.csr_euen;
                childregs->csr_crmd = p->thread.csr_crmd;
index 185e403..c74860b 100644 (file)
@@ -39,7 +39,6 @@
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
-#include <asm/smp.h>
 #include <asm/time.h>
 
 #define SMBIOS_BIOSSIZE_OFFSET         0x09
@@ -349,8 +348,6 @@ static void __init prefill_possible_map(void)
 
        nr_cpu_ids = possible;
 }
-#else
-static inline void prefill_possible_map(void) {}
 #endif
 
 void __init setup_arch(char **cmdline_p)
@@ -367,8 +364,10 @@ void __init setup_arch(char **cmdline_p)
        arch_mem_init(cmdline_p);
 
        resource_init();
+#ifdef CONFIG_SMP
        plat_smp_setup();
        prefill_possible_map();
+#endif
 
        paging_init();
 }
index b8c53b7..73cec62 100644 (file)
@@ -66,8 +66,6 @@ static cpumask_t cpu_core_setup_map;
 
 struct secondary_data cpuboot_data;
 static DEFINE_PER_CPU(int, cpu_state);
-DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
-EXPORT_PER_CPU_SYMBOL(irq_stat);
 
 enum ipi_msg_type {
        IPI_RESCHEDULE,
index be68c1f..c2ce2e6 100644 (file)
@@ -223,7 +223,6 @@ config PPC
        select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
        select HAVE_HW_BREAKPOINT               if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
        select HAVE_IOREMAP_PROT
-       select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZMA                 if DEFAULT_UIMAGE
@@ -786,7 +785,6 @@ config THREAD_SHIFT
        range 13 15
        default "15" if PPC_256K_PAGES
        default "14" if PPC64
-       default "14" if KASAN
        default "13"
        help
          Used to define the stack size. The default is almost always what you
index 125328d..af58f1e 100644 (file)
 
 #ifdef __KERNEL__
 
-#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
+#ifdef CONFIG_KASAN
+#define MIN_THREAD_SHIFT       (CONFIG_THREAD_SHIFT + 1)
+#else
+#define MIN_THREAD_SHIFT       CONFIG_THREAD_SHIFT
+#endif
+
+#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
 #define THREAD_SHIFT           PAGE_SHIFT
 #else
-#define THREAD_SHIFT           CONFIG_THREAD_SHIFT
+#define THREAD_SHIFT           MIN_THREAD_SHIFT
 #endif
 
 #define THREAD_SIZE            (1 << THREAD_SHIFT)
index 2e2a2a9..f91f0f2 100644 (file)
@@ -37,6 +37,8 @@ KASAN_SANITIZE_paca.o := n
 KASAN_SANITIZE_setup_64.o := n
 KASAN_SANITIZE_mce.o := n
 KASAN_SANITIZE_mce_power.o := n
+KASAN_SANITIZE_udbg.o := n
+KASAN_SANITIZE_udbg_16550.o := n
 
 # we have to be particularly careful in ppc64 to exclude code that
 # runs with translations off, as we cannot access the shadow with
index b62046b..ee04338 100644 (file)
@@ -2158,12 +2158,12 @@ static unsigned long ___get_wchan(struct task_struct *p)
                return 0;
 
        do {
-               sp = *(unsigned long *)sp;
+               sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
                if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
                    task_is_running(p))
                        return 0;
                if (count > 0) {
-                       ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
+                       ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
                        if (!in_sched_functions(ip))
                                return ip;
                }
index 5dca193..09c4963 100644 (file)
@@ -17,9 +17,13 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
 
 #ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
-       if (fpidx < (PT_FPSCR - PT_FPR0))
-               memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
-       else
+       if (fpidx < (PT_FPSCR - PT_FPR0)) {
+               if (IS_ENABLED(CONFIG_PPC32))
+                       // On 32-bit the index we are passed refers to 32-bit words
+                       *data = ((u32 *)child->thread.fp_state.fpr)[fpidx];
+               else
+                       memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
+       } else
                *data = child->thread.fp_state.fpscr;
 #else
        *data = 0;
@@ -39,9 +43,13 @@ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
 
 #ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
-       if (fpidx < (PT_FPSCR - PT_FPR0))
-               memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
-       else
+       if (fpidx < (PT_FPSCR - PT_FPR0)) {
+               if (IS_ENABLED(CONFIG_PPC32))
+                       // On 32-bit the index we are passed refers to 32-bit words
+                       ((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
+               else
+                       memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
+       } else
                child->thread.fp_state.fpscr = data;
 #endif
 
index 4d2dc22..5d7a72b 100644 (file)
@@ -444,4 +444,7 @@ void __init pt_regs_check(void)
         * real registers.
         */
        BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
+
+       // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
+       BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
 }
index 9bb43aa..a6fce31 100644 (file)
@@ -993,8 +993,8 @@ int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...)
  *
  * Return: A pointer to the specified errorlog or NULL if not found.
  */
-struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
-                                             uint16_t section_id)
+noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+                                                     uint16_t section_id)
 {
        struct rtas_ext_event_log_v6 *ext_log =
                (struct rtas_ext_event_log_v6 *)log->buffer;
index d85fa9f..80f5472 100644 (file)
@@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
 
 /* wait for all the CPUs to hit real mode but timeout if they don't come in */
 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
-static void __maybe_unused crash_kexec_wait_realmode(int cpu)
+noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
 {
        unsigned int msecs;
        int i;
index 1f3f9fe..0d04f9d 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/cacheflush.h>
 #include <asm/kdump.h>
 #include <mm/mmu_decl.h>
-#include <generated/compile.h>
 #include <generated/utsrelease.h>
 
 struct regions {
@@ -37,10 +36,6 @@ struct regions {
        int reserved_mem_size_cells;
 };
 
-/* Simplified build-specific string for starting entropy. */
-static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
-               LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
-
 struct regions __initdata regions;
 
 static __init void kaslr_get_cmdline(void *fdt)
@@ -71,7 +66,8 @@ static unsigned long __init get_boot_seed(void *fdt)
 {
        unsigned long hash = 0;
 
-       hash = rotate_xor(hash, build_str, sizeof(build_str));
+       /* build-specific string for starting entropy. */
+       hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
        hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
 
        return hash;
index 6488b38..19f0fc5 100644 (file)
@@ -4,6 +4,7 @@
 # in particular, idle code runs a bunch of things in real mode
 KASAN_SANITIZE_idle.o := n
 KASAN_SANITIZE_pci-ioda.o := n
+KASAN_SANITIZE_pci-ioda-tce.o := n
 # pnv_machine_check_early
 KASAN_SANITIZE_setup.o := n
 
index 181b855..82cae08 100644 (file)
@@ -465,6 +465,9 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
        u32 available_events;
        int index, rc = 0;
 
+       if (!p->stat_buffer_len)
+               return -ENOENT;
+
        available_events = (p->stat_buffer_len  - sizeof(struct papr_scm_perf_stats))
                        / sizeof(struct papr_scm_perf_stat);
        if (available_events == 0)
index b1a88f6..91c0b80 100644 (file)
@@ -125,6 +125,7 @@ config S390
        select CLONE_BACKWARDS2
        select DMA_OPS if PCI
        select DYNAMIC_FTRACE if FUNCTION_TRACER
+       select GCC12_NO_ARRAY_BOUNDS
        select GENERIC_ALLOCATOR
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_CPU_VULNERABILITIES
@@ -768,7 +769,6 @@ menu "Virtualization"
 config PROTECTED_VIRTUALIZATION_GUEST
        def_bool n
        prompt "Protected virtualization guest support"
-       select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
        help
          Select this option, if you want to be able to run this
          kernel as a protected virtualization KVM guest.
index d73611b..495c68a 100644 (file)
@@ -32,15 +32,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
 KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
-
-ifdef CONFIG_CC_IS_GCC
-       ifeq ($(call cc-ifversion, -ge, 1200, y), y)
-               ifeq ($(call cc-ifversion, -lt, 1300, y), y)
-                       KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
-                       KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
-               endif
-       endif
-endif
+KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_NO_ARRAY_BOUNDS),-Wno-array-bounds)
 
 UTS_MACHINE    := s390x
 STACK_SIZE     := $(if $(CONFIG_KASAN),65536,16384)
index 6fb6bf6..6a0ac00 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/cma.h>
 #include <linux/gfp.h>
 #include <linux/dma-direct.h>
+#include <linux/platform-feature.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgalloc.h>
@@ -168,22 +169,14 @@ bool force_dma_unencrypted(struct device *dev)
        return is_prot_virt_guest();
 }
 
-#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
-
-int arch_has_restricted_virtio_memory_access(void)
-{
-       return is_prot_virt_guest();
-}
-EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
-
-#endif
-
 /* protected virtualization */
 static void pv_init(void)
 {
        if (!is_prot_virt_guest())
                return;
 
+       platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS);
+
        /* make sure bounce buffers are shared */
        swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
        swiotlb_update_mem_attributes();
index 5c092a9..0278470 100644 (file)
@@ -544,6 +544,8 @@ static int um_pci_init_vqs(struct um_pci_device *dev)
        dev->cmd_vq = vqs[0];
        dev->irq_vq = vqs[1];
 
+       virtio_device_ready(dev->vdev);
+
        for (i = 0; i < NUM_IRQ_MSGS; i++) {
                void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
 
@@ -587,7 +589,7 @@ static int um_pci_virtio_probe(struct virtio_device *vdev)
        dev->irq = irq_alloc_desc(numa_node_id());
        if (dev->irq < 0) {
                err = dev->irq;
-               goto error;
+               goto err_reset;
        }
        um_pci_devices[free].dev = dev;
        vdev->priv = dev;
@@ -604,6 +606,9 @@ static int um_pci_virtio_probe(struct virtio_device *vdev)
 
        um_pci_rescan();
        return 0;
+err_reset:
+       virtio_reset_device(vdev);
+       vdev->config->del_vqs(vdev);
 error:
        mutex_unlock(&um_pci_mtx);
        kfree(dev);
index 9783ebc..be0b95e 100644 (file)
@@ -1542,7 +1542,6 @@ config X86_CPA_STATISTICS
 config X86_MEM_ENCRYPT
        select ARCH_HAS_FORCE_DMA_UNENCRYPTED
        select DYNAMIC_PHYSICAL_MASK
-       select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
        def_bool n
 
 config AMD_MEM_ENCRYPT
index 959d66b..3a240a6 100644 (file)
@@ -653,6 +653,7 @@ struct kvm_vcpu_arch {
        u64 ia32_misc_enable_msr;
        u64 smbase;
        u64 smi_count;
+       bool at_instruction_boundary;
        bool tpr_access_reporting;
        bool xsaves_enabled;
        bool xfd_no_write_intercept;
@@ -1300,6 +1301,8 @@ struct kvm_vcpu_stat {
        u64 nested_run;
        u64 directed_yield_attempted;
        u64 directed_yield_successful;
+       u64 preemption_reported;
+       u64 preemption_other;
        u64 guest_mode;
 };
 
index 35f222a..913e593 100644 (file)
@@ -439,7 +439,7 @@ do {                                                                        \
                       [ptr] "+m" (*_ptr),                              \
                       [old] "+a" (__old)                               \
                     : [new] ltype (__new)                              \
-                    : "memory", "cc");                                 \
+                    : "memory");                                       \
        if (unlikely(__err))                                            \
                goto label;                                             \
        if (unlikely(!success))                                         \
index f465368..e826ee9 100644 (file)
@@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
                roots_to_free |= KVM_MMU_ROOT_CURRENT;
 
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
-               if (is_obsolete_root(kvm, mmu->root.hpa))
+               if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
                        roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
        }
 
index 6d3b3e5..ee4802d 100644 (file)
@@ -145,6 +145,15 @@ static bool try_step_up(struct tdp_iter *iter)
        return true;
 }
 
+/*
+ * Step the iterator back up a level in the paging structure. Should only be
+ * used when the iterator is below the root level.
+ */
+void tdp_iter_step_up(struct tdp_iter *iter)
+{
+       WARN_ON(!try_step_up(iter));
+}
+
 /*
  * Step to the next SPTE in a pre-order traversal of the paging structure.
  * To get to the next SPTE, the iterator either steps down towards the goal
index f0af385..adfca0c 100644 (file)
@@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
                    int min_level, gfn_t next_last_level_gfn);
 void tdp_iter_next(struct tdp_iter *iter);
 void tdp_iter_restart(struct tdp_iter *iter);
+void tdp_iter_step_up(struct tdp_iter *iter);
 
 #endif /* __KVM_X86_MMU_TDP_ITER_H */
index 841feaa..7b9265d 100644 (file)
@@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
        gfn_t start = slot->base_gfn;
        gfn_t end = start + slot->npages;
        struct tdp_iter iter;
+       int max_mapping_level;
        kvm_pfn_t pfn;
 
        rcu_read_lock();
 
        tdp_root_for_each_pte(iter, root, start, end) {
-retry:
                if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
 
@@ -1755,15 +1755,41 @@ retry:
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
+               /*
+                * This is a leaf SPTE. Check if the PFN it maps can
+                * be mapped at a higher level.
+                */
                pfn = spte_to_pfn(iter.old_spte);
-               if (kvm_is_reserved_pfn(pfn) ||
-                   iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
-                                                           pfn, PG_LEVEL_NUM))
+
+               if (kvm_is_reserved_pfn(pfn))
                        continue;
 
+               max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
+                               iter.gfn, pfn, PG_LEVEL_NUM);
+
+               WARN_ON(max_mapping_level < iter.level);
+
+               /*
+                * If this page is already mapped at the highest
+                * viable level, there's nothing more to do.
+                */
+               if (max_mapping_level == iter.level)
+                       continue;
+
+               /*
+                * The page can be remapped at a higher level, so step
+                * up to zap the parent SPTE.
+                */
+               while (max_mapping_level > iter.level)
+                       tdp_iter_step_up(&iter);
+
                /* Note, a successful atomic zap also does a remote TLB flush. */
-               if (tdp_mmu_zap_spte_atomic(kvm, &iter))
-                       goto retry;
+               tdp_mmu_zap_spte_atomic(kvm, &iter);
+
+               /*
+                * If the atomic zap fails, the iter will recurse back into
+                * the same subtree to retry.
+                */
        }
 
        rcu_read_unlock();
index bed5e16..3361258 100644 (file)
@@ -982,7 +982,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
                WARN_ON(!svm->tsc_scaling_enabled);
                vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
-               svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
+               __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
        }
 
        svm->nested.ctl.nested_cr3 = 0;
@@ -1387,7 +1387,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
        vcpu->arch.tsc_scaling_ratio =
                kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
                                               svm->tsc_ratio_msr);
-       svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
+       __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
 }
 
 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
index 200045f..1dc02cd 100644 (file)
@@ -465,11 +465,24 @@ static int has_svm(void)
        return 1;
 }
 
+void __svm_write_tsc_multiplier(u64 multiplier)
+{
+       preempt_disable();
+
+       if (multiplier == __this_cpu_read(current_tsc_ratio))
+               goto out;
+
+       wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
+       __this_cpu_write(current_tsc_ratio, multiplier);
+out:
+       preempt_enable();
+}
+
 static void svm_hardware_disable(void)
 {
        /* Make sure we clean up behind us */
        if (tsc_scaling)
-               wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
+               __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
 
        cpu_svm_disable();
 
@@ -515,8 +528,7 @@ static int svm_hardware_enable(void)
                 * Set the default value, even if we don't use TSC scaling
                 * to avoid having stale value in the msr
                 */
-               wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
-               __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
+               __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
        }
 
 
@@ -999,11 +1011,12 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
 {
-       wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
+       __svm_write_tsc_multiplier(multiplier);
 }
 
+
 /* Evaluate instruction intercepts that depend on guest CPUID features. */
 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
                                              struct vcpu_svm *svm)
@@ -1363,13 +1376,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
                sev_es_prepare_switch_to_guest(hostsa);
        }
 
-       if (tsc_scaling) {
-               u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
-               if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
-                       __this_cpu_write(current_tsc_ratio, tsc_ratio);
-                       wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
-               }
-       }
+       if (tsc_scaling)
+               __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
 
        if (likely(tsc_aux_uret_slot >= 0))
                kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
@@ -4255,6 +4263,8 @@ out:
 
 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
 {
+       if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
+               vcpu->arch.at_instruction_boundary = true;
 }
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
index 21c5460..500348c 100644 (file)
@@ -590,7 +590,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                               bool has_error_code, u32 error_code);
 int nested_svm_exit_special(struct vcpu_svm *svm);
 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
-void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
+void __svm_write_tsc_multiplier(u64 multiplier);
 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
                                       struct vmcb_control_area *control);
 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
index a07e8cd..9bd86ec 100644 (file)
@@ -6547,6 +6547,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
                return;
 
        handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
+       vcpu->arch.at_instruction_boundary = true;
 }
 
 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
index e9473c7..03fbfbb 100644 (file)
@@ -296,6 +296,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
        STATS_DESC_COUNTER(VCPU, nested_run),
        STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
        STATS_DESC_COUNTER(VCPU, directed_yield_successful),
+       STATS_DESC_COUNTER(VCPU, preemption_reported),
+       STATS_DESC_COUNTER(VCPU, preemption_other),
        STATS_DESC_ICOUNTER(VCPU, guest_mode)
 };
 
@@ -4625,6 +4627,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        struct kvm_memslots *slots;
        static const u8 preempted = KVM_VCPU_PREEMPTED;
 
+       /*
+        * The vCPU can be marked preempted if and only if the VM-Exit was on
+        * an instruction boundary and will not trigger guest emulation of any
+        * kind (see vcpu_run).  Vendor specific code controls (conservatively)
+        * when this is true, for example allowing the vCPU to be marked
+        * preempted if and only if the VM-Exit was due to a host interrupt.
+        */
+       if (!vcpu->arch.at_instruction_boundary) {
+               vcpu->stat.preemption_other++;
+               return;
+       }
+
+       vcpu->stat.preemption_reported++;
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
@@ -4654,19 +4669,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        int idx;
 
-       if (vcpu->preempted && !vcpu->arch.guest_state_protected)
-               vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
+       if (vcpu->preempted) {
+               if (!vcpu->arch.guest_state_protected)
+                       vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
 
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-       if (kvm_xen_msr_enabled(vcpu->kvm))
-               kvm_xen_runstate_set_preempted(vcpu);
-       else
-               kvm_steal_time_set_preempted(vcpu);
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+               /*
+                * Take the srcu lock as memslots will be accessed to check the gfn
+                * cache generation against the memslots generation.
+                */
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+               if (kvm_xen_msr_enabled(vcpu->kvm))
+                       kvm_xen_runstate_set_preempted(vcpu);
+               else
+                       kvm_steal_time_set_preempted(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       }
 
        static_call(kvm_x86_vcpu_put)(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
@@ -10422,6 +10439,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->arch.l1tf_flush_l1d = true;
 
        for (;;) {
+               /*
+                * If another guest vCPU requests a PV TLB flush in the middle
+                * of instruction emulation, the rest of the emulation could
+                * use a stale page translation. Assume that any code after
+                * this point can start executing an instruction.
+                */
+               vcpu->arch.at_instruction_boundary = false;
                if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
                } else {
index ee5c4ae..532a535 100644 (file)
@@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
         * behalf of the vCPU. Only if the VMM does actually block
         * does it need to enter RUNSTATE_blocked.
         */
-       if (vcpu->preempted)
-               kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
+       if (WARN_ON_ONCE(!vcpu->preempted))
+               return;
+
+       kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
 }
 
 /* 32-bit compatibility definitions, also used natively in 32-bit build */
index 11350e2..9f27e14 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/swiotlb.h>
 #include <linux/cc_platform.h>
 #include <linux/mem_encrypt.h>
-#include <linux/virtio_config.h>
 
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
@@ -87,9 +86,3 @@ void __init mem_encrypt_init(void)
 
        print_mem_encrypt_feature_info();
 }
-
-int arch_has_restricted_virtio_memory_access(void)
-{
-       return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
-}
-EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
index e8f7953..f6d038e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/virtio_config.h>
 #include <linux/cc_platform.h>
+#include <linux/platform-feature.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -242,6 +243,9 @@ void __init sev_setup_arch(void)
        size = total_mem * 6 / 100;
        size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
        swiotlb_adjust_size(size);
+
+       /* Set restricted memory access for virtio. */
+       platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS);
 }
 
 static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
index 517a9d8..8b71b1d 100644 (file)
@@ -195,6 +195,8 @@ static void __init xen_hvm_guest_init(void)
        if (xen_pv_domain())
                return;
 
+       xen_set_restricted_virtio_memory_access();
+
        init_hvm_pv_info();
 
        reserve_shared_info();
index f33a442..e3297b1 100644 (file)
@@ -109,6 +109,8 @@ static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
 
 static void __init xen_pv_init_platform(void)
 {
+       xen_set_restricted_virtio_memory_access();
+
        populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
 
        set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
index f92d022..51c99f2 100644 (file)
@@ -1747,26 +1747,6 @@ bad:
 }
 EXPORT_SYMBOL(bioset_init);
 
-/*
- * Initialize and setup a new bio_set, based on the settings from
- * another bio_set.
- */
-int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
-{
-       int flags;
-
-       flags = 0;
-       if (src->bvec_pool.min_nr)
-               flags |= BIOSET_NEED_BVECS;
-       if (src->rescue_workqueue)
-               flags |= BIOSET_NEED_RESCUER;
-       if (src->cache)
-               flags |= BIOSET_PERCPU_CACHE;
-
-       return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
-}
-EXPORT_SYMBOL(bioset_init_from_src);
-
 static int __init init_bio(void)
 {
        int i;
index bb904f9..cb1a9da 100644 (file)
@@ -18,7 +18,7 @@ CFLAGS_blacklist_hashes.o += -I$(srctree)
 
 targets += blacklist_hashes_checked
 $(obj)/blacklist_hashes_checked: $(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(SYSTEM_BLACKLIST_HASH_LIST_FILENAME) scripts/check-blacklist-hashes.awk FORCE
-       $(call if_changed,check_blacklist_hashes,$(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(CONFIG_SYSTEM_BLACKLIST_HASH_LIST))
+       $(call if_changed,check_blacklist_hashes,$(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(CONFIG_SYSTEM_BLACKLIST_HASH_LIST))
 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
 else
 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
index f7ef786..8c1fb9a 100644 (file)
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
+/*
+ * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
+ *
+ * Remove this if/when that API is no longer used
+ */
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
 #define PKEY_ID_PKCS7 2
 
 static __attribute__((noreturn))
index 40e8164..9601fa9 100644 (file)
@@ -2010,16 +2010,16 @@ retry:
        return err_mask;
 }
 
-static bool ata_log_supported(struct ata_device *dev, u8 log)
+static int ata_log_supported(struct ata_device *dev, u8 log)
 {
        struct ata_port *ap = dev->link->ap;
 
        if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
-               return false;
+               return 0;
 
        if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
-               return false;
-       return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
+               return 0;
+       return get_unaligned_le16(&ap->sector_buf[log * 2]);
 }
 
 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
@@ -2455,15 +2455,20 @@ static void ata_dev_config_cpr(struct ata_device *dev)
        struct ata_cpr_log *cpr_log = NULL;
        u8 *desc, *buf = NULL;
 
-       if (ata_id_major_version(dev->id) < 11 ||
-           !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+       if (ata_id_major_version(dev->id) < 11)
+               goto out;
+
+       buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
+       if (buf_len == 0)
                goto out;
 
        /*
         * Read the concurrent positioning ranges log (0x47). We can have at
-        * most 255 32B range descriptors plus a 64B header.
+        * most 255 32B range descriptors plus a 64B header. This log varies in
+        * size, so use the size reported in the GPL directory. Reading beyond
+        * the supported length will result in an error.
         */
-       buf_len = (64 + 255 * 32 + 511) & ~511;
+       buf_len <<= 9;
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                goto out;
@@ -5462,7 +5467,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
                                      const struct ata_port_info * const * ppi,
                                      int n_ports)
 {
-       const struct ata_port_info *pi;
+       const struct ata_port_info *pi = &ata_dummy_port_info;
        struct ata_host *host;
        int i, j;
 
@@ -5470,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
        if (!host)
                return NULL;
 
-       for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+       for (i = 0, j = 0; i < host->n_ports; i++) {
                struct ata_port *ap = host->ports[i];
 
                if (ppi[j])
index 42cecf9..86dbb1c 100644 (file)
@@ -2125,7 +2125,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
 
        /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
        rbuf[1] = 0xb9;
-       put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
+       put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
 
        for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
                desc[0] = cpr_log->cpr[i].num;
index ca12985..c380278 100644 (file)
@@ -196,7 +196,7 @@ static struct {
        { XFER_PIO_0,                   "XFER_PIO_0" },
        { XFER_PIO_SLOW,                "XFER_PIO_SLOW" }
 };
-ata_bitfield_name_match(xfer,ata_xfer_names)
+ata_bitfield_name_search(xfer, ata_xfer_names)
 
 /*
  * ATA Port attributes
index 6b5ed30..35608a0 100644 (file)
@@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
                                int i;
                                res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
                                if (!res_dma) {
+                                       put_device(&dma_dev->dev);
                                        of_node_put(dma_node);
                                        return -EINVAL;
                                }
                                cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
                                                                         resource_size(res_dma));
                                if (!cf_port->dma_base) {
+                                       put_device(&dma_dev->dev);
                                        of_node_put(dma_node);
                                        return -EINVAL;
                                }
@@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
                                        irq = i;
                                        irq_handler = octeon_cf_interrupt;
                                }
+                               put_device(&dma_dev->dev);
                        }
                        of_node_put(dma_node);
                }
index 69fd31f..0b6c036 100644 (file)
@@ -429,28 +429,40 @@ config ADI
          driver include crash and makedumpfile.
 
 config RANDOM_TRUST_CPU
-       bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+       bool "Initialize RNG using CPU RNG instructions"
+       default y
        depends on ARCH_RANDOM
-       default n
        help
-       Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
-       RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
-       for the purposes of initializing Linux's CRNG.  Since this is not
-       something that can be independently audited, this amounts to trusting
-       that CPU manufacturer (perhaps with the insistence or mandate
-       of a Nation State's intelligence or law enforcement agencies)
-       has not installed a hidden back door to compromise the CPU's
-       random number generation facilities. This can also be configured
-       at boot with "random.trust_cpu=on/off".
+         Initialize the RNG using random numbers supplied by the CPU's
+         RNG instructions (e.g. RDRAND), if supported and available. These
+         random numbers are never used directly, but are rather hashed into
+         the main input pool, and this happens regardless of whether or not
+         this option is enabled. Instead, this option controls whether the
+         they are credited and hence can initialize the RNG. Additionally,
+         other sources of randomness are always used, regardless of this
+         setting.  Enabling this implies trusting that the CPU can supply high
+         quality and non-backdoored random numbers.
+
+         Say Y here unless you have reason to mistrust your CPU or believe
+         its RNG facilities may be faulty. This may also be configured at
+         boot time with "random.trust_cpu=on/off".
 
 config RANDOM_TRUST_BOOTLOADER
-       bool "Trust the bootloader to initialize Linux's CRNG"
-       help
-       Some bootloaders can provide entropy to increase the kernel's initial
-       device randomness. Say Y here to assume the entropy provided by the
-       booloader is trustworthy so it will be added to the kernel's entropy
-       pool. Otherwise, say N here so it will be regarded as device input that
-       only mixes the entropy pool. This can also be configured at boot with
-       "random.trust_bootloader=on/off".
+       bool "Initialize RNG using bootloader-supplied seed"
+       default y
+       help
+         Initialize the RNG using a seed supplied by the bootloader or boot
+         environment (e.g. EFI or a bootloader-generated device tree). This
+         seed is not used directly, but is rather hashed into the main input
+         pool, and this happens regardless of whether or not this option is
+         enabled. Instead, this option controls whether the seed is credited
+         and hence can initialize the RNG. Additionally, other sources of
+         randomness are always used, regardless of this setting. Enabling
+         this implies trusting that the bootloader can supply high quality and
+         non-backdoored seeds.
+
+         Say Y here unless you have reason to mistrust your bootloader or
+         believe its RNG facilities may be faulty. This may also be configured
+         at boot time with "random.trust_bootloader=on/off".
 
 endmenu
index e856df7..a6f3a8a 100644 (file)
@@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev)
                goto err_find;
        }
 
+       virtio_device_ready(vdev);
+
        /* we always have a pending entropy request */
        request_entropy(vi);
 
index b691b9d..655e327 100644 (file)
@@ -650,7 +650,8 @@ static void __cold _credit_init_bits(size_t bits)
 
        if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
                crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
-               execute_in_process_context(crng_set_ready, &set_ready);
+               if (static_key_initialized)
+                       execute_in_process_context(crng_set_ready, &set_ready);
                wake_up_interruptible(&crng_init_wait);
                kill_fasync(&fasync, SIGIO, POLL_IN);
                pr_notice("crng init done\n");
@@ -724,9 +725,8 @@ static void __cold _credit_init_bits(size_t bits)
  *
  **********************************************************************/
 
-static bool used_arch_random;
-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
 static int __init parse_trust_cpu(char *arg)
 {
        return kstrtobool(arg, &trust_cpu);
@@ -776,7 +776,7 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica
 int __init random_init(const char *command_line)
 {
        ktime_t now = ktime_get_real();
-       unsigned int i, arch_bytes;
+       unsigned int i, arch_bits;
        unsigned long entropy;
 
 #if defined(LATENT_ENTROPY_PLUGIN)
@@ -784,12 +784,12 @@ int __init random_init(const char *command_line)
        _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
 #endif
 
-       for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
+       for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
             i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
                if (!arch_get_random_seed_long_early(&entropy) &&
                    !arch_get_random_long_early(&entropy)) {
                        entropy = random_get_entropy();
-                       arch_bytes -= sizeof(entropy);
+                       arch_bits -= sizeof(entropy) * 8;
                }
                _mix_pool_bytes(&entropy, sizeof(entropy));
        }
@@ -798,11 +798,18 @@ int __init random_init(const char *command_line)
        _mix_pool_bytes(command_line, strlen(command_line));
        add_latent_entropy();
 
+       /*
+        * If we were initialized by the bootloader before jump labels are
+        * initialized, then we should enable the static branch here, where
+        * it's guaranteed that jump labels have been initialized.
+        */
+       if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
+               crng_set_ready(NULL);
+
        if (crng_ready())
                crng_reseed();
        else if (trust_cpu)
-               credit_init_bits(arch_bytes * 8);
-       used_arch_random = arch_bytes * 8 >= POOL_READY_BITS;
+               _credit_init_bits(arch_bits);
 
        WARN_ON(register_pm_notifier(&pm_notifier));
 
@@ -811,17 +818,6 @@ int __init random_init(const char *command_line)
        return 0;
 }
 
-/*
- * Returns whether arch randomness has been mixed into the initial
- * state of the RNG, regardless of whether or not that randomness
- * was credited. Knowing this is only good for a very limited set
- * of uses, such as early init printk pointer obfuscation.
- */
-bool rng_has_arch_random(void)
-{
-       return used_arch_random;
-}
-
 /*
  * Add device- or boot-specific data to the input pool to help
  * initialize it.
@@ -865,13 +861,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
  * Handle random seed passed by bootloader, and credit it if
  * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
  */
-void __cold add_bootloader_randomness(const void *buf, size_t len)
+void __init add_bootloader_randomness(const void *buf, size_t len)
 {
        mix_pool_bytes(buf, len);
        if (trust_bootloader)
                credit_init_bits(len * 8);
 }
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
 
 #if IS_ENABLED(CONFIG_VMGENID)
 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
index b55c74a..1ee62cd 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/seq_file.h>
+#include <linux/types.h>
 
 #define CRYSTALCOVE_GPIO_NUM   16
 #define CRYSTALCOVE_VGPIO_NUM  95
@@ -110,8 +111,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
        return reg + gpio % 8;
 }
 
-static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg,
-                                       int gpio)
+static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, int gpio)
 {
        u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0;
        int mask = BIT(gpio % 8);
@@ -140,8 +140,7 @@ static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio)
        return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
 }
 
-static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio,
-                                   int value)
+static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, int value)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
        int reg = to_reg(gpio, CTRL_OUT);
@@ -168,8 +167,7 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
        return val & 0x1;
 }
 
-static void crystalcove_gpio_set(struct gpio_chip *chip,
-                                unsigned int gpio, int value)
+static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
        int reg = to_reg(gpio, CTRL_OUT);
@@ -185,10 +183,10 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
 
 static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
 {
-       struct crystalcove_gpio *cg =
-               gpiochip_get_data(irq_data_get_irq_chip_data(data));
+       struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+       irq_hw_number_t hwirq = irqd_to_hwirq(data);
 
-       if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+       if (hwirq >= CRYSTALCOVE_GPIO_NUM)
                return 0;
 
        switch (type) {
@@ -215,22 +213,20 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
 
 static void crystalcove_bus_lock(struct irq_data *data)
 {
-       struct crystalcove_gpio *cg =
-               gpiochip_get_data(irq_data_get_irq_chip_data(data));
+       struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
        mutex_lock(&cg->buslock);
 }
 
 static void crystalcove_bus_sync_unlock(struct irq_data *data)
 {
-       struct crystalcove_gpio *cg =
-               gpiochip_get_data(irq_data_get_irq_chip_data(data));
-       int gpio = data->hwirq;
+       struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+       irq_hw_number_t hwirq = irqd_to_hwirq(data);
 
        if (cg->update & UPDATE_IRQ_TYPE)
-               crystalcove_update_irq_ctrl(cg, gpio);
+               crystalcove_update_irq_ctrl(cg, hwirq);
        if (cg->update & UPDATE_IRQ_MASK)
-               crystalcove_update_irq_mask(cg, gpio);
+               crystalcove_update_irq_mask(cg, hwirq);
        cg->update = 0;
 
        mutex_unlock(&cg->buslock);
@@ -238,34 +234,43 @@ static void crystalcove_bus_sync_unlock(struct irq_data *data)
 
 static void crystalcove_irq_unmask(struct irq_data *data)
 {
-       struct crystalcove_gpio *cg =
-               gpiochip_get_data(irq_data_get_irq_chip_data(data));
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+       struct crystalcove_gpio *cg = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(data);
 
-       if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
-               cg->set_irq_mask = false;
-               cg->update |= UPDATE_IRQ_MASK;
-       }
+       if (hwirq >= CRYSTALCOVE_GPIO_NUM)
+               return;
+
+       gpiochip_enable_irq(gc, hwirq);
+
+       cg->set_irq_mask = false;
+       cg->update |= UPDATE_IRQ_MASK;
 }
 
 static void crystalcove_irq_mask(struct irq_data *data)
 {
-       struct crystalcove_gpio *cg =
-               gpiochip_get_data(irq_data_get_irq_chip_data(data));
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+       struct crystalcove_gpio *cg = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(data);
 
-       if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
-               cg->set_irq_mask = true;
-               cg->update |= UPDATE_IRQ_MASK;
-       }
+       if (hwirq >= CRYSTALCOVE_GPIO_NUM)
+               return;
+
+       cg->set_irq_mask = true;
+       cg->update |= UPDATE_IRQ_MASK;
+
+       gpiochip_disable_irq(gc, hwirq);
 }
 
-static struct irq_chip crystalcove_irqchip = {
+static const struct irq_chip crystalcove_irqchip = {
        .name                   = "Crystal Cove",
        .irq_mask               = crystalcove_irq_mask,
        .irq_unmask             = crystalcove_irq_unmask,
        .irq_set_type           = crystalcove_irq_type,
        .irq_bus_lock           = crystalcove_bus_lock,
        .irq_bus_sync_unlock    = crystalcove_bus_sync_unlock,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
@@ -293,8 +298,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void crystalcove_gpio_dbg_show(struct seq_file *s,
-                                     struct gpio_chip *chip)
+static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
        int gpio, offset;
@@ -353,7 +357,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
        cg->regmap = pmic->regmap;
 
        girq = &cg->chip.irq;
-       girq->chip = &crystalcove_irqchip;
+       gpio_irq_chip_set_chip(girq, &crystalcove_irqchip);
        /* This will let us handle the parent IRQ in the driver */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index 08b9e2c..71fa437 100644 (file)
@@ -46,7 +46,6 @@
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
-       struct irq_chip irqchip;
 
        /*
         * Cache pin direction to save us one transfer, since the hardware has
@@ -306,6 +305,7 @@ static void dln2_irq_unmask(struct irq_data *irqd)
        struct dln2_gpio *dln2 = gpiochip_get_data(gc);
        int pin = irqd_to_hwirq(irqd);
 
+       gpiochip_enable_irq(gc, pin);
        set_bit(pin, dln2->unmasked_irqs);
 }
 
@@ -316,6 +316,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
        int pin = irqd_to_hwirq(irqd);
 
        clear_bit(pin, dln2->unmasked_irqs);
+       gpiochip_disable_irq(gc, pin);
 }
 
 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -384,6 +385,17 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
        mutex_unlock(&dln2->irq_lock);
 }
 
+static const struct irq_chip dln2_irqchip = {
+       .name = "dln2-irq",
+       .irq_mask = dln2_irq_mask,
+       .irq_unmask = dln2_irq_unmask,
+       .irq_set_type = dln2_irq_set_type,
+       .irq_bus_lock = dln2_irq_bus_lock,
+       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
+       .flags = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                            const void *data, int len)
 {
@@ -465,15 +477,8 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        dln2->gpio.direction_output = dln2_gpio_direction_output;
        dln2->gpio.set_config = dln2_gpio_set_config;
 
-       dln2->irqchip.name = "dln2-irq",
-       dln2->irqchip.irq_mask = dln2_irq_mask,
-       dln2->irqchip.irq_unmask = dln2_irq_unmask,
-       dln2->irqchip.irq_set_type = dln2_irq_set_type,
-       dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
-       dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
-
        girq = &dln2->gpio.irq;
-       girq->chip = &dln2->irqchip;
+       gpio_irq_chip_set_chip(girq, &dln2_irqchip);
        /* The event comes from the outside so no parent handler */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index 04afe72..c22fcaa 100644 (file)
@@ -662,10 +662,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio)
        gpio->clks[1].id = "db";
        err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
                                         gpio->clks);
-       if (err) {
-               dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
-               return err;
-       }
+       if (err)
+               return dev_err_probe(gpio->dev, err,
+                                    "Cannot get APB/Debounce clocks\n");
 
        err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
        if (err) {
index f3d1bae..72ac09a 100644 (file)
@@ -220,10 +220,8 @@ static void mrfld_irq_ack(struct irq_data *d)
        raw_spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
+static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask)
 {
-       struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
-       u32 gpio = irqd_to_hwirq(d);
        void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR);
        unsigned long flags;
        u32 value;
@@ -241,12 +239,20 @@ static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
 
 static void mrfld_irq_mask(struct irq_data *d)
 {
-       mrfld_irq_unmask_mask(d, false);
+       struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
+       u32 gpio = irqd_to_hwirq(d);
+
+       mrfld_irq_unmask_mask(priv, gpio, false);
+       gpiochip_disable_irq(&priv->chip, gpio);
 }
 
 static void mrfld_irq_unmask(struct irq_data *d)
 {
-       mrfld_irq_unmask_mask(d, true);
+       struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
+       u32 gpio = irqd_to_hwirq(d);
+
+       gpiochip_enable_irq(&priv->chip, gpio);
+       mrfld_irq_unmask_mask(priv, gpio, true);
 }
 
 static int mrfld_irq_set_type(struct irq_data *d, unsigned int type)
@@ -329,13 +335,15 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
-static struct irq_chip mrfld_irqchip = {
+static const struct irq_chip mrfld_irqchip = {
        .name           = "gpio-merrifield",
        .irq_ack        = mrfld_irq_ack,
        .irq_mask       = mrfld_irq_mask,
        .irq_unmask     = mrfld_irq_unmask,
        .irq_set_type   = mrfld_irq_set_type,
        .irq_set_wake   = mrfld_irq_set_wake,
+       .flags          = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static void mrfld_irq_handler(struct irq_desc *desc)
@@ -482,7 +490,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
                return retval;
 
        girq = &priv->chip.irq;
-       girq->chip = &mrfld_irqchip;
+       gpio_irq_chip_set_chip(girq, &mrfld_irqchip);
        girq->init_hw = mrfld_irq_init_hw;
        girq->parent_handler = mrfld_irq_handler;
        girq->num_parents = 1;
index acda4c5..8a83f7b 100644 (file)
@@ -38,7 +38,6 @@
 
 struct sch_gpio {
        struct gpio_chip chip;
-       struct irq_chip irqchip;
        spinlock_t lock;
        unsigned short iobase;
        unsigned short resume_base;
@@ -218,11 +217,9 @@ static void sch_irq_ack(struct irq_data *d)
        spin_unlock_irqrestore(&sch->lock, flags);
 }
 
-static void sch_irq_mask_unmask(struct irq_data *d, int val)
+static void sch_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t gpio_num, int val)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct sch_gpio *sch = gpiochip_get_data(gc);
-       irq_hw_number_t gpio_num = irqd_to_hwirq(d);
        unsigned long flags;
 
        spin_lock_irqsave(&sch->lock, flags);
@@ -232,14 +229,32 @@ static void sch_irq_mask_unmask(struct irq_data *d, int val)
 
 static void sch_irq_mask(struct irq_data *d)
 {
-       sch_irq_mask_unmask(d, 0);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+
+       sch_irq_mask_unmask(gc, gpio_num, 0);
+       gpiochip_disable_irq(gc, gpio_num);
 }
 
 static void sch_irq_unmask(struct irq_data *d)
 {
-       sch_irq_mask_unmask(d, 1);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+
+       gpiochip_enable_irq(gc, gpio_num);
+       sch_irq_mask_unmask(gc, gpio_num, 1);
 }
 
+static const struct irq_chip sch_irqchip = {
+       .name = "sch_gpio",
+       .irq_ack = sch_irq_ack,
+       .irq_mask = sch_irq_mask,
+       .irq_unmask = sch_irq_unmask,
+       .irq_set_type = sch_irq_type,
+       .flags = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
 {
        struct sch_gpio *sch = context;
@@ -367,14 +382,8 @@ static int sch_gpio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, sch);
 
-       sch->irqchip.name = "sch_gpio";
-       sch->irqchip.irq_ack = sch_irq_ack;
-       sch->irqchip.irq_mask = sch_irq_mask;
-       sch->irqchip.irq_unmask = sch_irq_unmask;
-       sch->irqchip.irq_set_type = sch_irq_type;
-
        girq = &sch->chip.irq;
-       girq->chip = &sch->irqchip;
+       gpio_irq_chip_set_chip(girq, &sch_irqchip);
        girq->num_parents = 0;
        girq->parents = NULL;
        girq->parent_handler = NULL;
index 16a0fae..c18b6b4 100644 (file)
@@ -299,6 +299,8 @@ static void wcove_irq_unmask(struct irq_data *data)
        if (gpio >= WCOVE_GPIO_NUM)
                return;
 
+       gpiochip_enable_irq(chip, gpio);
+
        wg->set_irq_mask = false;
        wg->update |= UPDATE_IRQ_MASK;
 }
@@ -314,15 +316,19 @@ static void wcove_irq_mask(struct irq_data *data)
 
        wg->set_irq_mask = true;
        wg->update |= UPDATE_IRQ_MASK;
+
+       gpiochip_disable_irq(chip, gpio);
 }
 
-static struct irq_chip wcove_irqchip = {
+static const struct irq_chip wcove_irqchip = {
        .name                   = "Whiskey Cove",
        .irq_mask               = wcove_irq_mask,
        .irq_unmask             = wcove_irq_unmask,
        .irq_set_type           = wcove_irq_type,
        .irq_bus_lock           = wcove_bus_lock,
        .irq_bus_sync_unlock    = wcove_bus_sync_unlock,
+       .flags                  = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
@@ -452,7 +458,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
        }
 
        girq = &wg->chip.irq;
-       girq->chip = &wcove_irqchip;
+       gpio_irq_chip_set_chip(girq, &wcove_irqchip);
        /* This will let us handle the parent IRQ in the driver */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index 67abf8d..6b6d46e 100644 (file)
@@ -1918,9 +1918,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
                return -EINVAL;
        }
 
-       /* delete kgd_mem from kfd_bo_list to avoid re-validating
-        * this BO in BO's restoring after eviction.
-        */
        mutex_lock(&mem->process_info->lock);
 
        ret = amdgpu_bo_reserve(bo, true);
@@ -1943,7 +1940,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
 
        amdgpu_amdkfd_remove_eviction_fence(
                bo, mem->process_info->eviction_fence);
-       list_del_init(&mem->validate_list.head);
 
        if (size)
                *size = amdgpu_bo_size(bo);
@@ -2512,12 +2508,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
        process_info->eviction_fence = new_fence;
        *ef = dma_fence_get(&new_fence->base);
 
-       /* Attach new eviction fence to all BOs */
+       /* Attach new eviction fence to all BOs except pinned ones */
        list_for_each_entry(mem, &process_info->kfd_bo_list,
-               validate_list.head)
+               validate_list.head) {
+               if (mem->bo->tbo.pin_count)
+                       continue;
+
                amdgpu_bo_fence(mem->bo,
                        &process_info->eviction_fence->base, true);
-
+       }
        /* Attach eviction fence to PD / PT BOs */
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                            vm_list_node) {
index ede2fa5..1669915 100644 (file)
@@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
        int r;
-       r = amdgpu_ras_block_late_init(adev, ras_block);
-       if (r)
-               return r;
 
        if (amdgpu_ras_is_supported(adev, ras_block->block)) {
                if (!amdgpu_persistent_edc_harvesting_supported(adev))
                        amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
 
+               r = amdgpu_ras_block_late_init(adev, ras_block);
+               if (r)
+                       return r;
+
                r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
                if (r)
                        goto late_fini;
+       } else {
+               amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
        }
 
        return 0;
index 798c562..aebc384 100644 (file)
@@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
        case IP_VERSION(9, 1, 0):
        /* RENOIR looks like RAVEN */
        case IP_VERSION(9, 3, 0):
+       /* GC 10.3.7 */
+       case IP_VERSION(10, 3, 7):
                if (amdgpu_tmz == 0) {
                        adev->gmc.tmz_enabled = false;
                        dev_info(adev->dev,
@@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
        case IP_VERSION(10, 3, 1):
        /* YELLOW_CARP*/
        case IP_VERSION(10, 3, 3):
-       /* GC 10.3.7 */
-       case IP_VERSION(10, 3, 7):
                /* Don't enable it by default yet.
                 */
                if (amdgpu_tmz < 1) {
index 2de9309..dac202a 100644 (file)
@@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;
 
+       /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
+       if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+           obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+               if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+                       dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
+       }
+
        s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
                        "ue", info.ue_count,
                        "ce", info.ce_count);
@@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;
 
-       if (obj->adev->asic_type == CHIP_ALDEBARAN) {
+       if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+           obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
                if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
-                       DRM_WARN("Failed to reset error counter and error status");
+                       dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
        }
 
        return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
@@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
                }
        }
 
-       if (!amdgpu_persistent_edc_harvesting_supported(adev))
-               amdgpu_ras_reset_error_status(adev, info->head.block);
-
        return 0;
 }
 
@@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
                if (res)
                        return res;
 
+               if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+                       if (amdgpu_ras_reset_error_status(adev, info.head.block))
+                               dev_warn(adev->dev, "Failed to reset error counter and error status");
+               }
+
                ce += info.ce_count;
                ue += info.ue_count;
        }
@@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
                        continue;
 
                amdgpu_ras_query_error_status(adev, &info);
+
+               if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+                       if (amdgpu_ras_reset_error_status(adev, info.head.block))
+                               dev_warn(adev->dev, "Failed to reset error counter and error status");
+               }
        }
 }
 
@@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
            !amdgpu_ras_asic_supported(adev))
                return;
 
-       if (!(amdgpu_sriov_vf(adev) &&
-               (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))))
+       /* If driver run on sriov guest side, only enable ras for aldebaran */
+       if (amdgpu_sriov_vf(adev) &&
+               adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
                return;
 
        if (!adev->gmc.xgmi.connected_to_cpu) {
index 2ceeaa4..dc76d2b 100644 (file)
@@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 {
        struct amdgpu_vm_update_params params;
        struct amdgpu_vm_bo_base *entry;
+       bool flush_tlb_needed = false;
        int r, idx;
 
        if (list_empty(&vm->relocated))
@@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
                goto error;
 
        list_for_each_entry(entry, &vm->relocated, vm_status) {
+               /* vm_flush_needed after updating moved PDEs */
+               flush_tlb_needed |= entry->moved;
+
                r = amdgpu_vm_pde_update(&params, entry);
                if (r)
                        goto error;
@@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
        if (r)
                goto error;
 
-       /* vm_flush_needed after updating PDEs */
-       atomic64_inc(&vm->tlb_seq);
+       if (flush_tlb_needed)
+               atomic64_inc(&vm->tlb_seq);
 
        while (!list_empty(&vm->relocated)) {
                entry = list_first_entry(&vm->relocated,
@@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
                     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
 
+       /*
+        * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
+        */
+       flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
+
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
index 8c0a3fc..a4a6751 100644 (file)
@@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
        dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
        dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
        dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
+       dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
 }
 
 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
@@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade
                memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
 
        if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
-               *(uint64_t *)fw_autoload_mask |= 1 << id;
+               *(uint64_t *)fw_autoload_mask |= 1ULL << id;
 }
 
 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
@@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
        return 0;
 }
 
-void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
 {
        u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
 
@@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
                break;
        default:
                BUG();
+               break;
        }
 }
 
index a0c0b7d..7f4b480 100644 (file)
@@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
+#ifdef CONFIG_X86_64
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
+               adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
+               adev->gmc.aper_size = adev->gmc.real_vram_size;
+       }
+#endif
        /* In case the PCI BAR is larger than the actual amount of vram */
        adev->gmc.visible_vram_size = adev->gmc.aper_size;
        if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
index 5d2dfef..d63d3f2 100644 (file)
@@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] =
        IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0)
 };
 
-void program_imu_rlc_ram(struct amdgpu_device *adev,
+static void program_imu_rlc_ram(struct amdgpu_device *adev,
                                const struct imu_rlc_ram_golden *regs,
                                const u32 array_size)
 {
index d2722ad..f3c1af5 100644 (file)
@@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
 {
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
+       amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET,
+               0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT));
+
        amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
                0, 0, PACKETJ_TYPE0));
        amdgpu_ring_write(ring, (vmid | (vmid << 4)));
@@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
                8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
                18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
                8 + 16,
-       .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */
+       .emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */
        .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
        .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
        .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
index 1a03baa..654e43e 100644 (file)
@@ -41,6 +41,7 @@
 #define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET                         0x4084
 #define mmUVD_JRBC_STATUS_INTERNAL_OFFSET                              0x4089
 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET                               0x401f
+#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET                             0x4149
 
 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR                               0x18000
 
index fcf5194..7eee004 100644 (file)
@@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
 
 /* This function is for backdoor MES firmware */
 static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
-                                   enum admgpu_mes_pipe pipe)
+                                   enum admgpu_mes_pipe pipe, bool prime_icache)
 {
        int r;
        uint32_t data;
@@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
        /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
        WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF);
 
-       /* invalidate ICACHE */
-       data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
-       data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
-       data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
-       WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
-
-       /* prime the ICACHE. */
-       data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
-       data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
-       WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
+       if (prime_icache) {
+               /* invalidate ICACHE */
+               data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
+               data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
+               data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
+               WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
+
+               /* prime the ICACHE. */
+               data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
+               data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
+               WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
+       }
 
        soc21_grbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
@@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
        int r = 0;
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
-               r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE);
+
+               r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
                if (r) {
-                       DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
+                       DRM_ERROR("failed to load MES fw, r=%d\n", r);
                        return r;
                }
 
-               r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE);
+               r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
                if (r) {
-                       DRM_ERROR("failed to load MES fw, r=%d\n", r);
+                       DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
                        return r;
                }
+
        }
 
        mes_v11_0_enable(adev, true);
@@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle)
        if (!adev->enable_mes_kiq) {
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
                        r = mes_v11_0_load_microcode(adev,
-                                            AMDGPU_MES_SCHED_PIPE);
+                                            AMDGPU_MES_SCHED_PIPE, true);
                        if (r) {
                                DRM_ERROR("failed to MES fw, r=%d\n", r);
                                return r;
index d016e3c..b3fba8d 100644 (file)
@@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
        {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
        {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
        {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs yc_video_codecs_decode = {
index 06b2635..83c6cca 100644 (file)
@@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
        }
 }
 
+
 /**
  * sdma_v5_2_gfx_stop - stop the gfx async dma engines
  *
@@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
 }
 
 /**
- * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
- * context switch for an instance
+ * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
  *
  * @adev: amdgpu_device pointer
- * @instance_idx: the index of the SDMA instance
+ * @enable: enable/disable the DMA MEs context switch.
  *
- * Unhalt the async dma engines context switch.
+ * Halt or unhalt the async dma engines context switch.
  */
-static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
+static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 {
        u32 f32_cntl, phase_quantum = 0;
-
-       if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
-               return;
-       }
+       int i;
 
        if (amdgpu_sdma_phase_quantum) {
                unsigned value = amdgpu_sdma_phase_quantum;
@@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev,
                phase_quantum =
                        value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
                        unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
-
-               WREG32_SOC15_IP(GC,
-                       sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
-                       phase_quantum);
-               WREG32_SOC15_IP(GC,
-                       sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
-                   phase_quantum);
-               WREG32_SOC15_IP(GC,
-                       sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
-                   phase_quantum);
        }
 
-       if (!amdgpu_sriov_vf(adev)) {
-               f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
-               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
-                               AUTO_CTXSW_ENABLE, 1);
-               WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (enable && amdgpu_sdma_phase_quantum) {
+                       WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
+                              phase_quantum);
+                       WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
+                              phase_quantum);
+                       WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
+                              phase_quantum);
+               }
+
+               if (!amdgpu_sriov_vf(adev)) {
+                       f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+                                       AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+                       WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+               }
        }
+
 }
 
 /**
- * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
+ * sdma_v5_2_enable - stop the async dma engines
  *
  * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
  *
- * Halt the async dma engines context switch.
+ * Halt or unhalt the async dma engines.
  */
-static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
+static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
 {
        u32 f32_cntl;
        int i;
 
-       if (amdgpu_sriov_vf(adev))
-               return;
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
-               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
-                               AUTO_CTXSW_ENABLE, 0);
-               WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+       if (!enable) {
+               sdma_v5_2_gfx_stop(adev);
+               sdma_v5_2_rlc_stop(adev);
        }
-}
-
-/**
- * sdma_v5_2_halt - stop the async dma engines
- *
- * @adev: amdgpu_device pointer
- *
- * Halt the async dma engines.
- */
-static void sdma_v5_2_halt(struct amdgpu_device *adev)
-{
-       int i;
-       u32 f32_cntl;
-
-       sdma_v5_2_gfx_stop(adev);
-       sdma_v5_2_rlc_stop(adev);
 
        if (!amdgpu_sriov_vf(adev)) {
                for (i = 0; i < adev->sdma.num_instances; i++) {
                        f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
-                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
                        WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
                }
        }
@@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  *
  * Set up the gfx DMA ring buffers and enable them.
- * It assumes that the dma engine is stopped for each instance.
- * The function enables the engine and preemptions sequentially for each instance.
- *
  * Returns 0 for success, error for failure.
  */
 static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
@@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
 
                ring->sched.ready = true;
 
-               sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
+               if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+                       sdma_v5_2_ctx_switch_enable(adev, true);
+                       sdma_v5_2_enable(adev, true);
+               }
 
                r = amdgpu_ring_test_ring(ring);
                if (r) {
@@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
        int i, j;
 
        /* halt the MEs */
-       sdma_v5_2_halt(adev);
+       sdma_v5_2_enable(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                if (!adev->sdma.instance[i].fw)
@@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
        int r = 0;
 
        if (amdgpu_sriov_vf(adev)) {
-               sdma_v5_2_ctx_switch_disable_all(adev);
-               sdma_v5_2_halt(adev);
+               sdma_v5_2_ctx_switch_enable(adev, false);
+               sdma_v5_2_enable(adev, false);
 
                /* set RB registers */
                r = sdma_v5_2_gfx_resume(adev);
@@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
                amdgpu_gfx_off_ctrl(adev, false);
 
        sdma_v5_2_soft_reset(adev);
+       /* unhalt the MEs */
+       sdma_v5_2_enable(adev, true);
+       /* enable sdma ring preemption */
+       sdma_v5_2_ctx_switch_enable(adev, true);
 
-       /* Soft reset supposes to disable the dma engine and preemption.
-        * Now start the gfx rings and rlc compute queues.
-        */
+       /* start the gfx rings and rlc compute queues */
        r = sdma_v5_2_gfx_resume(adev);
        if (adev->in_s0ix)
                amdgpu_gfx_off_ctrl(adev, true);
@@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       sdma_v5_2_ctx_switch_disable_all(adev);
-       sdma_v5_2_halt(adev);
+       sdma_v5_2_ctx_switch_enable(adev, false);
+       sdma_v5_2_enable(adev, false);
 
        return 0;
 }
index 3cabcee..39405f0 100644 (file)
@@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
-                               struct amdgpu_job *job)
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
 {
        struct drm_gpu_scheduler **scheds;
 
        /* The create msg must be in the first IB submitted */
-       if (atomic_read(&job->base.entity->fence_seq))
+       if (atomic_read(&p->entity->fence_seq))
                return -EINVAL;
 
        scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
                [AMDGPU_RING_PRIO_DEFAULT].sched;
-       drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+       drm_sched_entity_modify_sched(p->entity, scheds, 1);
        return 0;
 }
 
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
-                           uint64_t addr)
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
 {
        struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_bo_va_mapping *map;
@@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
                if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
                        continue;
 
-               r = vcn_v3_0_limit_sched(p, job);
+               r = vcn_v3_0_limit_sched(p);
                if (r)
                        goto out;
        }
@@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
                                           struct amdgpu_job *job,
                                           struct amdgpu_ib *ib)
 {
-       struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+       struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
        uint32_t msg_lo = 0, msg_hi = 0;
        unsigned i;
        int r;
@@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
                        msg_hi = val;
                } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
                           val == 0) {
-                       r = vcn_v3_0_dec_msg(p, job,
-                                            ((u64)msg_hi) << 32 | msg_lo);
+                       r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
                        if (r)
                                return r;
                }
index 5e9adbc..cbfb32b 100644 (file)
@@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
                        num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
                        break;
                case IP_VERSION(10, 3, 3):
+               case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
+               case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
                        pcache_info = yellow_carp_cache_info;
                        num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
                        break;
index 8667e3d..bf42004 100644 (file)
@@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
        case IP_VERSION(4, 1, 2):/* RENOIR */
        case IP_VERSION(5, 2, 1):/* VANGOGH */
        case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
+       case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
+       case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
        case IP_VERSION(6, 0, 1):
                kfd->device_info.num_sdma_queues_per_engine = 2;
                break;
@@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
        case IP_VERSION(9, 4, 2): /* ALDEBARAN */
        case IP_VERSION(10, 3, 1): /* VANGOGH */
        case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
+       case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
+       case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
        case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
        case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 1, 10): /* NAVI10 */
@@ -178,7 +182,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
 
                if (gc_version < IP_VERSION(11, 0, 0)) {
                        /* Navi2x+, Navi1x+ */
-                       if (gc_version >= IP_VERSION(10, 3, 0))
+                       if (gc_version == IP_VERSION(10, 3, 6))
+                               kfd->device_info.no_atomic_fw_version = 14;
+                       else if (gc_version >= IP_VERSION(10, 3, 0))
                                kfd->device_info.no_atomic_fw_version = 92;
                        else if (gc_version >= IP_VERSION(10, 1, 1))
                                kfd->device_info.no_atomic_fw_version = 145;
@@ -368,6 +374,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
                        if (!vf)
                                f2g = &gfx_v10_3_kfd2kgd;
                        break;
+               case IP_VERSION(10, 3, 6):
+                       gfx_target_version = 100306;
+                       if (!vf)
+                               f2g = &gfx_v10_3_kfd2kgd;
+                       break;
+               case IP_VERSION(10, 3, 7):
+                       gfx_target_version = 100307;
+                       if (!vf)
+                               f2g = &gfx_v10_3_kfd2kgd;
+                       break;
                case IP_VERSION(11, 0, 0):
                        gfx_target_version = 110000;
                        f2g = &gfx_v11_kfd2kgd;
index 997650d..e44376c 100644 (file)
@@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                         struct migrate_vma *migrate, struct dma_fence **mfence,
                         dma_addr_t *scratch)
 {
-       uint64_t npages = migrate->cpages;
+       uint64_t npages = migrate->npages;
        struct device *dev = adev->dev;
        struct amdgpu_res_cursor cursor;
        dma_addr_t *src;
@@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                                                mfence);
                                if (r)
                                        goto out_free_vram_pages;
-                               amdgpu_res_next(&cursor, j << PAGE_SHIFT);
+                               amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
                                j = 0;
                        } else {
                                amdgpu_res_next(&cursor, PAGE_SIZE);
@@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
                        continue;
                }
                src[i] = svm_migrate_addr(adev, spage);
-               if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
+               if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
                        r = svm_migrate_copy_memory_gart(adev, dst + i - j,
                                                         src + i - j, j,
                                                         FROM_VRAM_TO_RAM,
index 2ebf013..7b33224 100644 (file)
@@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
                r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
                                           last_start, prange->start + i,
                                           pte_flags,
-                                          last_start - prange->start,
+                                          (last_start - prange->start) << PAGE_SHIFT,
                                           bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
                                           NULL, dma_addr, &vm->last_update);
 
@@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
 
        if (range->event == MMU_NOTIFY_RELEASE)
                return true;
+       if (!mmget_not_zero(mni->mm))
+               return true;
 
        start = mni->interval_tree.start;
        last = mni->interval_tree.last;
@@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
        }
 
        svm_range_unlock(prange);
+       mmput(mni->mm);
 
        return true;
 }
index ceb3437..bca5f01 100644 (file)
@@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
 
 void dcn31_init_clocks(struct clk_mgr *clk_mgr)
 {
+       uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
        memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
        // Assumption is that boot state always supports pstate
+       clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;      // restore ref_dtbclk
        clk_mgr->clks.p_state_change_support = true;
        clk_mgr->clks.prev_p_state_change_support = true;
        clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
@@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
        }
 }
 
+int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+{
+       return clk_mgr_base->clks.ref_dtbclk_khz;
+}
+
 static struct clk_mgr_funcs dcn31_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
        .update_clocks = dcn31_update_clocks,
        .init_clocks = dcn31_init_clocks,
        .enable_pme_wa = dcn31_enable_pme_wa,
@@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct(
        }
 
        clk_mgr->base.base.dprefclk_khz = 600000;
-       clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
+       clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
        dce_clock_read_ss_info(&clk_mgr->base);
        /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
        //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
index 961b10a..be06fdb 100644 (file)
@@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx,
                struct pp_smu_funcs *pp_smu,
                struct dccg *dccg);
 
+int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base);
+
 void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
 
 #endif //__DCN31_CLK_MGR_H__
index a2ade6e..fb4ae80 100644 (file)
@@ -41,9 +41,7 @@
 
 #include "dc_dmub_srv.h"
 
-#if defined (CONFIG_DRM_AMD_DC_DP2_0)
 #include "dc_link_dp.h"
-#endif
 
 #define TO_CLK_MGR_DCN315(clk_mgr)\
        container_of(clk_mgr, struct clk_mgr_dcn315, base)
@@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base)
 
 static struct clk_mgr_funcs dcn315_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
        .update_clocks = dcn315_update_clocks,
        .init_clocks = dcn31_init_clocks,
        .enable_pme_wa = dcn315_enable_pme_wa,
@@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct(
 
        clk_mgr->base.base.dprefclk_khz = 600000;
        clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
-       clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
+       clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
        dce_clock_read_ss_info(&clk_mgr->base);
-       clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+       clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
 
        clk_mgr->base.base.bw_params = &dcn315_bw_params;
 
index fc3af81..e4bb9c6 100644 (file)
@@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
 static struct clk_mgr_funcs dcn316_funcs = {
        .enable_pme_wa = dcn316_enable_pme_wa,
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
        .update_clocks = dcn316_update_clocks,
        .init_clocks = dcn31_init_clocks,
        .are_clock_states_equal = dcn31_are_clock_states_equal,
@@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct(
 
        clk_mgr->base.base.dprefclk_khz = 600000;
        clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
-       clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
+       clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
        dce_clock_read_ss_info(&clk_mgr->base);
        /*clk_mgr->base.dccg->ref_dtbclk_khz =
        dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
index dc30ac3..cbc47ae 100644 (file)
@@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = {
 
 static bool decide_fallback_link_setting(
                struct dc_link *link,
-               struct dc_link_settings initial_link_settings,
-               struct dc_link_settings *current_link_setting,
+               struct dc_link_settings *max,
+               struct dc_link_settings *cur,
                enum link_training_result training_result);
 static void maximize_lane_settings(const struct link_training_settings *lt_settings,
                struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
@@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries(
        enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
        enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
        struct dc_link_settings cur_link_settings = *link_setting;
+       struct dc_link_settings max_link_settings = *link_setting;
        const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        int fail_count = 0;
        bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
@@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries(
 
        dp_trace_commit_lt_init(link);
 
-
        if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
                /* We need to do this before the link training to ensure the idle
                 * pattern in SST mode will be sent right after the link training
@@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries(
                        uint32_t req_bw;
                        uint32_t link_bw;
 
-                       decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
-                       /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
-                        * minimum link bandwidth.
+                       decide_fallback_link_setting(link, &max_link_settings,
+                                       &cur_link_settings, status);
+                       /* Fail link training if reduced link bandwidth no longer meets
+                        * stream requirements.
                         */
                        req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
                        link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
-                       is_link_bw_low = (req_bw > link_bw);
-                       is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
-                               (cur_link_settings.lane_count <= LANE_COUNT_ONE));
-
-                       if (is_link_bw_low)
-                               DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
-                                       __func__, req_bw, link_bw);
+                       if (req_bw > link_bw)
+                               break;
                }
 
                msleep(delay_between_attempts);
@@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap(
        int *fail_count)
 {
        struct dc_link_settings cur_link_settings = {0};
-       struct dc_link_settings initial_link_settings = *known_limit_link_setting;
+       struct dc_link_settings max_link_settings = *known_limit_link_setting;
        bool success = false;
        bool skip_video_pattern;
        enum clock_source_id dp_cs_id = get_clock_source_id(link);
@@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap(
        struct link_resource link_res;
 
        memset(&irq_data, 0, sizeof(irq_data));
-       cur_link_settings = initial_link_settings;
+       cur_link_settings = max_link_settings;
 
        /* Grant extended timeout request */
        if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
@@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap(
                dp_trace_lt_result_update(link, status, true);
                dp_disable_link_phy(link, &link_res, link->connector_signal);
        } while (!success && decide_fallback_link_setting(link,
-                       initial_link_settings, &cur_link_settings, status));
+                       &max_link_settings, &cur_link_settings, status));
 
        link->verified_link_cap = success ?
                        cur_link_settings : fail_safe_link_settings;
@@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy(
  */
 static bool decide_fallback_link_setting(
                struct dc_link *link,
-               struct dc_link_settings initial_link_settings,
-               struct dc_link_settings *current_link_setting,
+               struct dc_link_settings *max,
+               struct dc_link_settings *cur,
                enum link_training_result training_result)
 {
-       if (!current_link_setting)
+       if (!cur)
                return false;
-       if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
+       if (!max)
+               return false;
+
+       if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
                        link->dc->debug.force_dp2_lt_fallback_method)
-               return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings,
-                               current_link_setting, training_result);
+               return decide_fallback_link_setting_max_bw_policy(link, max, cur,
+                               training_result);
 
        switch (training_result) {
        case LINK_TRAINING_CR_FAIL_LANE0:
@@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting(
        case LINK_TRAINING_CR_FAIL_LANE23:
        case LINK_TRAINING_LQA_FAIL:
        {
-               if (!reached_minimum_link_rate
-                               (current_link_setting->link_rate)) {
-                       current_link_setting->link_rate =
-                               reduce_link_rate(
-                                       current_link_setting->link_rate);
-               } else if (!reached_minimum_lane_count
-                               (current_link_setting->lane_count)) {
-                       current_link_setting->link_rate =
-                               initial_link_settings.link_rate;
+               if (!reached_minimum_link_rate(cur->link_rate)) {
+                       cur->link_rate = reduce_link_rate(cur->link_rate);
+               } else if (!reached_minimum_lane_count(cur->lane_count)) {
+                       cur->link_rate = max->link_rate;
                        if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
                                return false;
                        else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
-                               current_link_setting->lane_count =
-                                               LANE_COUNT_ONE;
-                       else if (training_result ==
-                                       LINK_TRAINING_CR_FAIL_LANE23)
-                               current_link_setting->lane_count =
-                                               LANE_COUNT_TWO;
+                               cur->lane_count = LANE_COUNT_ONE;
+                       else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
+                               cur->lane_count = LANE_COUNT_TWO;
                        else
-                               current_link_setting->lane_count =
-                                       reduce_lane_count(
-                                       current_link_setting->lane_count);
+                               cur->lane_count = reduce_lane_count(cur->lane_count);
                } else {
                        return false;
                }
@@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting(
        }
        case LINK_TRAINING_EQ_FAIL_EQ:
        {
-               if (!reached_minimum_lane_count
-                               (current_link_setting->lane_count)) {
-                       current_link_setting->lane_count =
-                               reduce_lane_count(
-                                       current_link_setting->lane_count);
-               } else if (!reached_minimum_link_rate
-                               (current_link_setting->link_rate)) {
-                       current_link_setting->link_rate =
-                               reduce_link_rate(
-                                       current_link_setting->link_rate);
-                       current_link_setting->lane_count = initial_link_settings.lane_count;
+               if (!reached_minimum_lane_count(cur->lane_count)) {
+                       cur->lane_count = reduce_lane_count(cur->lane_count);
+               } else if (!reached_minimum_link_rate(cur->link_rate)) {
+                       cur->link_rate = reduce_link_rate(cur->link_rate);
+                       /* Reduce max link rate to avoid potential infinite loop.
+                        * Needed so that any subsequent CR_FAIL fallback can't
+                        * re-set the link rate higher than the link rate from
+                        * the latest EQ_FAIL fallback.
+                        */
+                       max->link_rate = cur->link_rate;
+                       cur->lane_count = max->lane_count;
                } else {
                        return false;
                }
@@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting(
        }
        case LINK_TRAINING_EQ_FAIL_CR:
        {
-               if (!reached_minimum_link_rate
-                               (current_link_setting->link_rate)) {
-                       current_link_setting->link_rate =
-                               reduce_link_rate(
-                                       current_link_setting->link_rate);
-                       current_link_setting->lane_count = initial_link_settings.lane_count;
+               if (!reached_minimum_link_rate(cur->link_rate)) {
+                       cur->link_rate = reduce_link_rate(cur->link_rate);
+                       /* Reduce max link rate to avoid potential infinite loop.
+                        * Needed so that any subsequent CR_FAIL fallback can't
+                        * re-set the link rate higher than the link rate from
+                        * the latest EQ_FAIL fallback.
+                        */
+                       max->link_rate = cur->link_rate;
+                       cur->lane_count = max->lane_count;
                } else {
                        return false;
                }
index 3960c74..817028d 100644 (file)
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.186"
+#define DC_VER "3.2.187"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -416,6 +416,7 @@ struct dc_clocks {
        bool p_state_change_support;
        enum dcn_zstate_support_state zstate_support;
        bool dtbclk_en;
+       int ref_dtbclk_khz;
        enum dcn_pwr_state pwr_state;
        /*
         * Elements below are not compared for the purposes of
@@ -719,6 +720,8 @@ struct dc_debug_options {
        bool apply_vendor_specific_lttpr_wa;
        bool extended_blank_optimization;
        union aux_wake_wa_options aux_wake_wa;
+       /* uses value at boot and disables switch */
+       bool disable_dtb_ref_clk_switch;
        uint8_t psr_power_use_phy_fsm;
        enum dml_hostvm_override_opts dml_hostvm_override;
 };
index 287a106..bbc58d1 100644 (file)
@@ -513,12 +513,10 @@ void dccg31_set_physymclk(
 /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
 static void dccg31_set_dtbclk_dto(
                struct dccg *dccg,
-               int dtbclk_inst,
-               int req_dtbclk_khz,
-               int num_odm_segments,
-               const struct dc_crtc_timing *timing)
+               struct dtbclk_dto_params *params)
 {
        struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+       int req_dtbclk_khz = params->pixclk_khz;
        uint32_t dtbdto_div;
 
        /* Mode                 DTBDTO Rate       DTBCLK_DTO<x>_DIV Register
@@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto(
         * DSC native 4:2:2     pixel rate/2      4
         * Other modes          pixel rate        8
         */
-       if (num_odm_segments == 4) {
+       if (params->num_odm_segments == 4) {
                dtbdto_div = 2;
-               req_dtbclk_khz = req_dtbclk_khz / 4;
-       } else if ((num_odm_segments == 2) ||
-                       (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
-                       (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
-                                       && !timing->dsc_cfg.ycbcr422_simple)) {
+               req_dtbclk_khz = params->pixclk_khz / 4;
+       } else if ((params->num_odm_segments == 2) ||
+                       (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+                       (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
+                                       && !params->timing->dsc_cfg.ycbcr422_simple)) {
                dtbdto_div = 4;
-               req_dtbclk_khz = req_dtbclk_khz / 2;
+               req_dtbclk_khz = params->pixclk_khz / 2;
        } else
                dtbdto_div = 8;
 
-       if (dccg->ref_dtbclk_khz && req_dtbclk_khz) {
+       if (params->ref_dtbclk_khz && req_dtbclk_khz) {
                uint32_t modulo, phase;
 
                // phase / modulo = dtbclk / dtbclk ref
-               modulo = dccg->ref_dtbclk_khz * 1000;
-               phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1),
-                       dccg->ref_dtbclk_khz);
+               modulo = params->ref_dtbclk_khz * 1000;
+               phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1),
+                               params->ref_dtbclk_khz);
 
-               REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
-                               DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div);
+               REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+                               DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
 
-               REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo);
-               REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase);
+               REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
+               REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
 
-               REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
-                               DTBCLK_DTO_ENABLE[dtbclk_inst], 1);
+               REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+                               DTBCLK_DTO_ENABLE[params->otg_inst], 1);
 
-               REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
-                               DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1,
+               REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+                               DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
                                1, 100);
 
                /* The recommended programming sequence to enable DTBCLK DTO to generate
                 * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
                 * be set only after DTO is enabled
                 */
-               REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
-                               PIPE_DTO_SRC_SEL[dtbclk_inst], 1);
-
-               dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz;
+               REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+                               PIPE_DTO_SRC_SEL[params->otg_inst], 1);
        } else {
-               REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
-                               DTBCLK_DTO_ENABLE[dtbclk_inst], 0,
-                               PIPE_DTO_SRC_SEL[dtbclk_inst], 0,
-                               DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div);
+               REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+                               DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+                               PIPE_DTO_SRC_SEL[params->otg_inst], 0,
+                               DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
 
-               REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0);
-               REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0);
-
-               dccg->dtbclk_khz[dtbclk_inst] = 0;
+               REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+               REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
        }
 }
 
@@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto(
 
                REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
                                DCCG_AUDIO_DTO_SEL, 4);  //  04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK
-
-               dccg->audio_dtbclk_khz = req_audio_dtbclk_khz;
        } else {
                REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0);
                REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0);
 
                REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
                                DCCG_AUDIO_DTO_SEL, 3);  //  03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO
-
-               dccg->audio_dtbclk_khz = 0;
        }
 }
 
index d94fd10..8b12b41 100644 (file)
@@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc)
        AUX_RX_PHASE_DETECT_LEN,  [21,20] = 0x3 default is 3
        AUX_RX_DETECTION_THRESHOLD [30:28] = 1
 */
-       AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
-
-       AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+       // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init
 
        //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
        // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
index 789f756..d227367 100644 (file)
@@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (!context->res_ctx.pipe_ctx[i].stream)
                        continue;
-#if defined (CONFIG_DRM_AMD_DC_DP2_0)
                if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
                        return true;
-#endif
        }
        return false;
 }
index 46ce5a0..b5570aa 100644 (file)
@@ -237,6 +237,7 @@ struct clk_mgr_funcs {
                        bool safe_to_lower);
 
        int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+       int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr);
 
        void (*set_low_power_state)(struct clk_mgr *clk_mgr);
 
index b2fa4de..c702191 100644 (file)
@@ -60,8 +60,17 @@ struct dccg {
        const struct dccg_funcs *funcs;
        int pipe_dppclk_khz[MAX_PIPES];
        int ref_dppclk;
-       int dtbclk_khz[MAX_PIPES];
-       int audio_dtbclk_khz;
+       //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
+       //int audio_dtbclk_khz;/* TODO needs to be removed */
+       int ref_dtbclk_khz;/* TODO needs to be removed */
+};
+
+struct dtbclk_dto_params {
+       const struct dc_crtc_timing *timing;
+       int otg_inst;
+       int pixclk_khz;
+       int req_audio_dtbclk_khz;
+       int num_odm_segments;
        int ref_dtbclk_khz;
 };
 
@@ -111,10 +120,7 @@ struct dccg_funcs {
 
        void (*set_dtbclk_dto)(
                        struct dccg *dccg,
-                       int dtbclk_inst,
-                       int req_dtbclk_khz,
-                       int num_odm_segments,
-                       const struct dc_crtc_timing *timing);
+                       struct dtbclk_dto_params *dto_params);
 
        void (*set_audio_dtbclk_dto)(
                        struct dccg *dccg,
index 87972dc..ea6cf8b 100644 (file)
@@ -27,6 +27,7 @@
 #include "core_types.h"
 #include "dccg.h"
 #include "dc_link_dp.h"
+#include "clk_mgr.h"
 
 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
 {
@@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
        struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
        struct dccg *dccg = dc->res_pool->dccg;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
-       int odm_segment_count = get_odm_segment_count(pipe_ctx);
+       struct dtbclk_dto_params dto_params = {0};
        enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
 
+       dto_params.otg_inst = tg->inst;
+       dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+       dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+       dto_params.timing = &pipe_ctx->stream->timing;
+       dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+
        dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst);
        dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
-       dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk,
-                       odm_segment_count,
-                       &pipe_ctx->stream->timing);
+       dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
        stream_enc->funcs->enable_stream(stream_enc);
        stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
 }
@@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
        struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
        struct dccg *dccg = dc->res_pool->dccg;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
+       struct dtbclk_dto_params dto_params = {0};
+
+       dto_params.otg_inst = tg->inst;
+       dto_params.timing = &pipe_ctx->stream->timing;
 
        stream_enc->funcs->disable(stream_enc);
-       dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing);
+       dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
        dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
        dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst);
 }
index 7c9330a..c7bd7e2 100644 (file)
@@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
 {
        union dmub_gpint_data_register cmd;
        const uint32_t timeout = 100;
-       uint32_t in_reset, scratch, i;
+       uint32_t in_reset, scratch, i, pwait_mode;
 
        REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
 
@@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
                        udelay(1);
                }
 
+               for (i = 0; i < timeout; ++i) {
+                       REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+                       if (pwait_mode & (1 << 0))
+                               break;
+
+                       udelay(1);
+               }
                /* Force reset in case we timed out, DMCUB is likely hung. */
        }
 
@@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
        REG_WRITE(DMCUB_INBOX1_WPTR, 0);
        REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
        REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
+       REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
+       REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
        REG_WRITE(DMCUB_SCRATCH0, 0);
 
        /* Clear the GPINT command manually so we don't send anything during boot. */
index 59ddc81..f6db6f8 100644 (file)
@@ -151,7 +151,8 @@ struct dmub_srv;
        DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \
        DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
        DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
-       DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
+       DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+       DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
 
 struct dmub_srv_dcn31_reg_offset {
 #define DMUB_SR(reg) uint32_t reg;
index 73b9e0a..20a3d4e 100644 (file)
@@ -127,6 +127,8 @@ struct av_sync_data {
 static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
 static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
 
+static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
+
 /*MST Dock*/
 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
 
index 247c6e9..1cb399d 100644 (file)
@@ -22,6 +22,7 @@
 #ifndef SMU_11_0_7_PPTABLE_H
 #define SMU_11_0_7_PPTABLE_H
 
+#pragma pack(push, 1)
 
 #define SMU_11_0_7_TABLE_FORMAT_REVISION                  15
 
@@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table
     uint32_t max[SMU_11_0_7_MAX_ODSETTING];                   //default maximum settings
     uint32_t min[SMU_11_0_7_MAX_ODSETTING];                   //default minimum settings
     int16_t  pm_setting[SMU_11_0_7_MAX_PMSETTING];            //Optimized power mode feature settings
-} __attribute__((packed));
+};
 
 enum SMU_11_0_7_PPCLOCK_ID {
     SMU_11_0_7_PPCLOCK_GFXCLK = 0,
@@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table
     uint32_t count;                                           //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT
     uint32_t max[SMU_11_0_7_MAX_PPCLOCK];                       //PowerSavingClock Mode Clock Maximum array In MHz
     uint32_t min[SMU_11_0_7_MAX_PPCLOCK];                       //PowerSavingClock Mode Clock Minimum array In MHz
-} __attribute__((packed));
+};
 
 struct smu_11_0_7_powerplay_table
 {
@@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table
       struct smu_11_0_7_overdrive_table               overdrive_table;
 
       PPTable_t smc_pptable;                        //PPTable_t in smu11_driver_if.h
-} __attribute__((packed));
+};
+
+#pragma pack(pop)
 
 #endif
index 7a63cf8..0116e3d 100644 (file)
@@ -22,6 +22,7 @@
 #ifndef SMU_11_0_PPTABLE_H
 #define SMU_11_0_PPTABLE_H
 
+#pragma pack(push, 1)
 
 #define SMU_11_0_TABLE_FORMAT_REVISION                  12
 
@@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table
     uint8_t  cap[SMU_11_0_MAX_ODFEATURE];                     //OD feature support flags
     uint32_t max[SMU_11_0_MAX_ODSETTING];                     //default maximum settings
     uint32_t min[SMU_11_0_MAX_ODSETTING];                     //default minimum settings
-} __attribute__((packed));
+};
 
 enum SMU_11_0_PPCLOCK_ID {
     SMU_11_0_PPCLOCK_GFXCLK = 0,
@@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table
     uint32_t count;                                           //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
     uint32_t max[SMU_11_0_MAX_PPCLOCK];                       //PowerSavingClock Mode Clock Maximum array In MHz
     uint32_t min[SMU_11_0_MAX_PPCLOCK];                       //PowerSavingClock Mode Clock Minimum array In MHz
-} __attribute__((packed));
+};
 
 struct smu_11_0_powerplay_table
 {
@@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table
 #ifndef SMU_11_0_PARTIAL_PPTABLE
       PPTable_t smc_pptable;                        //PPTable_t in smu11_driver_if.h
 #endif
-} __attribute__((packed));
+};
+
+#pragma pack(pop)
 
 #endif
index 3f29f43..478862d 100644 (file)
@@ -22,6 +22,8 @@
 #ifndef SMU_13_0_7_PPTABLE_H
 #define SMU_13_0_7_PPTABLE_H
 
+#pragma pack(push, 1)
+
 #define SMU_13_0_7_TABLE_FORMAT_REVISION 15
 
 //// POWERPLAYTABLE::ulPlatformCaps
@@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table
     struct smu_13_0_7_overdrive_table overdrive_table;
     uint8_t padding1;
     PPTable_t smc_pptable; //PPTable_t in driver_if.h
-} __attribute__((packed));
+};
 
+#pragma pack(pop)
 
 #endif
index 1f31139..0433074 100644 (file)
@@ -22,6 +22,8 @@
 #ifndef SMU_13_0_PPTABLE_H
 #define SMU_13_0_PPTABLE_H
 
+#pragma pack(push, 1)
+
 #define SMU_13_0_TABLE_FORMAT_REVISION                  1
 
 //// POWERPLAYTABLE::ulPlatformCaps
@@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table {
        uint8_t  cap[SMU_13_0_MAX_ODFEATURE];                     //OD feature support flags
        uint32_t max[SMU_13_0_MAX_ODSETTING];                     //default maximum settings
        uint32_t min[SMU_13_0_MAX_ODSETTING];                     //default minimum settings
-} __attribute__((packed));
+};
 
 enum SMU_13_0_PPCLOCK_ID {
        SMU_13_0_PPCLOCK_GFXCLK = 0,
@@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table {
        uint32_t count;                                           //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
        uint32_t max[SMU_13_0_MAX_PPCLOCK];                       //PowerSavingClock Mode Clock Maximum array In MHz
        uint32_t min[SMU_13_0_MAX_PPCLOCK];                       //PowerSavingClock Mode Clock Minimum array In MHz
-} __attribute__((packed));
+};
 
 struct smu_13_0_powerplay_table {
        struct atom_common_table_header header;
@@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table {
 #ifndef SMU_13_0_PARTIAL_PPTABLE
        PPTable_t smc_pptable;                        //PPTable_t in driver_if.h
 #endif
-} __attribute__((packed));
+};
+
+#pragma pack(pop)
 
 #endif
index 9c8829f..f7863d6 100644 (file)
@@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
        drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
                if (plane == &ipu_crtc->plane[0]->base)
                        disable_full = true;
-               if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+               if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
                        disable_partial = true;
        }
 
index b9bb94b..424ef47 100644 (file)
@@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata;
 #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
 #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
 
+static __always_inline int __intel_idle(struct cpuidle_device *dev,
+                                       struct cpuidle_driver *drv, int index)
+{
+       struct cpuidle_state *state = &drv->states[index];
+       unsigned long eax = flg2MWAIT(state->flags);
+       unsigned long ecx = 1; /* break on interrupt flag */
+
+       mwait_idle_with_hints(eax, ecx);
+
+       return index;
+}
+
 /**
  * intel_idle - Ask the processor to enter the given idle state.
  * @dev: cpuidle device of the target CPU.
@@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata;
 static __cpuidle int intel_idle(struct cpuidle_device *dev,
                                struct cpuidle_driver *drv, int index)
 {
-       struct cpuidle_state *state = &drv->states[index];
-       unsigned long eax = flg2MWAIT(state->flags);
-       unsigned long ecx = 1; /* break on interrupt flag */
+       return __intel_idle(dev, drv, index);
+}
 
-       if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)
-               local_irq_enable();
+static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+                                   struct cpuidle_driver *drv, int index)
+{
+       int ret;
 
-       mwait_idle_with_hints(eax, ecx);
+       raw_local_irq_enable();
+       ret = __intel_idle(dev, drv, index);
+       raw_local_irq_disable();
 
-       return index;
+       return ret;
 }
 
 /**
@@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
                /* Structure copy. */
                drv->states[drv->state_count] = cpuidle_state_table[cstate];
 
+               if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
+                       drv->states[drv->state_count].enter = intel_idle_irq;
+
                if ((disabled_states_mask & BIT(drv->state_count)) ||
                    ((icpu->use_acpi || force_use_acpi) &&
                     intel_idle_off_by_default(mwait_hint) &&
index 505a032..9dcf3f5 100644 (file)
@@ -402,6 +402,7 @@ config JOYSTICK_N64
 config JOYSTICK_SENSEHAT
        tristate "Raspberry Pi Sense HAT joystick"
        depends on INPUT && I2C
+       depends on HAS_IOMEM
        select MFD_SIMPLE_MFD_I2C
        help
          Say Y here if you want to enable the driver for the
index cbb1599..4804761 100644 (file)
@@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
        },
        {
                /*
-                * Lenovo Yoga Tab2 1051L, something messes with the home-button
+                * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
                 * IRQ settings, leading to a non working home-button.
                 */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
                },
        },
        {} /* Terminating entry */
index 59a1450..ca15061 100644 (file)
@@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
        if (!dev->tp_data)
                goto err_free_bt_buffer;
 
-       if (dev->bt_urb)
+       if (dev->bt_urb) {
                usb_fill_int_urb(dev->bt_urb, udev,
                                 usb_rcvintpipe(udev, cfg->bt_ep),
                                 dev->bt_data, dev->cfg.bt_datalen,
                                 bcm5974_irq_button, dev, 1);
 
+               dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+       }
+
        usb_fill_int_urb(dev->tp_urb, udev,
                         usb_rcvintpipe(udev, cfg->tp_ep),
                         dev->tp_data, dev->cfg.tp_datalen,
                         bcm5974_irq_trackpad, dev, 1);
 
+       dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
        /* create bcm5974 device */
        usb_make_path(udev, dev->phys, sizeof(dev->phys));
        strlcat(dev->phys, "/input0", sizeof(dev->phys));
index d21648a..54c0473 100644 (file)
@@ -33,6 +33,14 @@ struct dm_kobject_holder {
  * access their members!
  */
 
+/*
+ * For mempools pre-allocation at the table loading time.
+ */
+struct dm_md_mempools {
+       struct bio_set bs;
+       struct bio_set io_bs;
+};
+
 struct mapped_device {
        struct mutex suspend_lock;
 
@@ -110,8 +118,7 @@ struct mapped_device {
        /*
         * io objects are allocated from here.
         */
-       struct bio_set io_bs;
-       struct bio_set bs;
+       struct dm_md_mempools *mempools;
 
        /* kobject and completion */
        struct dm_kobject_holder kobj_holder;
index 6087cdc..a83b98a 100644 (file)
@@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq,
 {
        int r;
 
-       r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
+       r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
                              dm_rq_bio_constructor, tio);
        if (r)
                return r;
index 0e833a1..bd539af 100644 (file)
@@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
        return 0;
 }
 
-void dm_table_free_md_mempools(struct dm_table *t)
-{
-       dm_free_md_mempools(t->mempools);
-       t->mempools = NULL;
-}
-
-struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
-{
-       return t->mempools;
-}
-
 static int setup_indexes(struct dm_table *t)
 {
        int i;
index dfb0a55..d8f1618 100644 (file)
@@ -136,14 +136,6 @@ static int get_swap_bios(void)
        return latch;
 }
 
-/*
- * For mempools pre-allocation at the table loading time.
- */
-struct dm_md_mempools {
-       struct bio_set bs;
-       struct bio_set io_bs;
-};
-
 struct table_device {
        struct list_head list;
        refcount_t count;
@@ -581,7 +573,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
        struct dm_target_io *tio;
        struct bio *clone;
 
-       clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs);
+       clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
        /* Set default bdev, but target must bio_set_dev() before issuing IO */
        clone->bi_bdev = md->disk->part0;
 
@@ -628,7 +620,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
        } else {
                struct mapped_device *md = ci->io->md;
 
-               clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs);
+               clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
+                                       &md->mempools->bs);
                if (!clone)
                        return NULL;
                /* Set default bdev, but target must bio_set_dev() before issuing IO */
@@ -1023,23 +1016,19 @@ static void clone_endio(struct bio *bio)
        struct dm_io *io = tio->io;
        struct mapped_device *md = io->md;
 
-       if (likely(bio->bi_bdev != md->disk->part0)) {
-               struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
-               if (unlikely(error == BLK_STS_TARGET)) {
-                       if (bio_op(bio) == REQ_OP_DISCARD &&
-                           !bdev_max_discard_sectors(bio->bi_bdev))
-                               disable_discard(md);
-                       else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                                !q->limits.max_write_zeroes_sectors)
-                               disable_write_zeroes(md);
-               }
-
-               if (static_branch_unlikely(&zoned_enabled) &&
-                   unlikely(blk_queue_is_zoned(q)))
-                       dm_zone_endio(io, bio);
+       if (unlikely(error == BLK_STS_TARGET)) {
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !bdev_max_discard_sectors(bio->bi_bdev))
+                       disable_discard(md);
+               else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                        !bdev_write_zeroes_sectors(bio->bi_bdev))
+                       disable_write_zeroes(md);
        }
 
+       if (static_branch_unlikely(&zoned_enabled) &&
+           unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+               dm_zone_endio(io, bio);
+
        if (endio) {
                int r = endio(ti, bio, &error);
                switch (r) {
@@ -1876,8 +1865,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
 {
        if (md->wq)
                destroy_workqueue(md->wq);
-       bioset_exit(&md->bs);
-       bioset_exit(&md->io_bs);
+       dm_free_md_mempools(md->mempools);
 
        if (md->dax_dev) {
                dax_remove_host(md->disk);
@@ -2049,48 +2037,6 @@ static void free_dev(struct mapped_device *md)
        kvfree(md);
 }
 
-static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
-{
-       struct dm_md_mempools *p = dm_table_get_md_mempools(t);
-       int ret = 0;
-
-       if (dm_table_bio_based(t)) {
-               /*
-                * The md may already have mempools that need changing.
-                * If so, reload bioset because front_pad may have changed
-                * because a different table was loaded.
-                */
-               bioset_exit(&md->bs);
-               bioset_exit(&md->io_bs);
-
-       } else if (bioset_initialized(&md->bs)) {
-               /*
-                * There's no need to reload with request-based dm
-                * because the size of front_pad doesn't change.
-                * Note for future: If you are to reload bioset,
-                * prep-ed requests in the queue may refer
-                * to bio from the old bioset, so you must walk
-                * through the queue to unprep.
-                */
-               goto out;
-       }
-
-       BUG_ON(!p ||
-              bioset_initialized(&md->bs) ||
-              bioset_initialized(&md->io_bs));
-
-       ret = bioset_init_from_src(&md->bs, &p->bs);
-       if (ret)
-               goto out;
-       ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
-       if (ret)
-               bioset_exit(&md->bs);
-out:
-       /* mempool bind completed, no longer need any mempools in the table */
-       dm_table_free_md_mempools(t);
-       return ret;
-}
-
 /*
  * Bind a table to the device.
  */
@@ -2144,12 +2090,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
                 * immutable singletons - used to optimize dm_mq_queue_rq.
                 */
                md->immutable_target = dm_table_get_immutable_target(t);
-       }
 
-       ret = __bind_mempools(md, t);
-       if (ret) {
-               old_map = ERR_PTR(ret);
-               goto out;
+               /*
+                * There is no need to reload with request-based dm because the
+                * size of front_pad doesn't change.
+                *
+                * Note for future: If you are to reload bioset, prep-ed
+                * requests in the queue may refer to bio from the old bioset,
+                * so you must walk through the queue to unprep.
+                */
+               if (!md->mempools) {
+                       md->mempools = t->mempools;
+                       t->mempools = NULL;
+               }
+       } else {
+               /*
+                * The md may already have mempools that need changing.
+                * If so, reload bioset because front_pad may have changed
+                * because a different table was loaded.
+                */
+               dm_free_md_mempools(md->mempools);
+               md->mempools = t->mempools;
+               t->mempools = NULL;
        }
 
        ret = dm_table_set_restrictions(t, md->queue, limits);
index 3f89664..a8405ce 100644 (file)
@@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
 bool dm_table_bio_based(struct dm_table *t);
 bool dm_table_request_based(struct dm_table *t);
-void dm_table_free_md_mempools(struct dm_table *t);
-struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
 
 void dm_lock_md_type(struct mapped_device *md);
 void dm_unlock_md_type(struct mapped_device *md);
index 1259ca2..f4a1281 100644 (file)
@@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
        err = mmc_cqe_recovery(host);
        if (err)
                mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
-       else
-               mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+       mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
 
        pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
 }
index 1499a64..f13c08d 100644 (file)
@@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
        struct sdhci_host *host = slot->host;
        u16 clock;
 
+       if (host->mmc->ios.power_mode != MMC_POWER_ON)
+               return 0;
+
        clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
 
        clock |= SDHCI_CLOCK_PLL_EN;
index ebee5f0..be2719a 100644 (file)
@@ -51,6 +51,7 @@ static char *status_str[] = {
 };
 
 static char *type_str[] = {
+       "", /* Type 0 is not defined */
        "AMT_MSG_DISCOVERY",
        "AMT_MSG_ADVERTISEMENT",
        "AMT_MSG_REQUEST",
@@ -2220,8 +2221,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
        struct amt_header_advertisement *amta;
        int hdr_size;
 
-       hdr_size = sizeof(*amta) - sizeof(struct amt_header);
-
+       hdr_size = sizeof(*amta) + sizeof(struct udphdr);
        if (!pskb_may_pull(skb, hdr_size))
                return true;
 
@@ -2251,19 +2251,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
        struct ethhdr *eth;
        struct iphdr *iph;
 
+       hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+       if (!pskb_may_pull(skb, hdr_size))
+               return true;
+
        amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
        if (amtmd->reserved || amtmd->version)
                return true;
 
-       hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
        if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
                return true;
+
        skb_reset_network_header(skb);
        skb_push(skb, sizeof(*eth));
        skb_reset_mac_header(skb);
        skb_pull(skb, sizeof(*eth));
        eth = eth_hdr(skb);
+
+       if (!pskb_may_pull(skb, sizeof(*iph)))
+               return true;
        iph = ip_hdr(skb);
+
        if (iph->version == 4) {
                if (!ipv4_is_multicast(iph->daddr))
                        return true;
@@ -2274,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
        } else if (iph->version == 6) {
                struct ipv6hdr *ip6h;
 
+               if (!pskb_may_pull(skb, sizeof(*ip6h)))
+                       return true;
+
                ip6h = ipv6_hdr(skb);
                if (!ipv6_addr_is_multicast(&ip6h->daddr))
                        return true;
@@ -2306,8 +2317,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
        struct iphdr *iph;
        int hdr_size, len;
 
-       hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
-
+       hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
        if (!pskb_may_pull(skb, hdr_size))
                return true;
 
@@ -2315,22 +2325,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
        if (amtmq->reserved || amtmq->version)
                return true;
 
-       hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
+       hdr_size -= sizeof(*eth);
        if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
                return true;
+
        oeth = eth_hdr(skb);
        skb_reset_mac_header(skb);
        skb_pull(skb, sizeof(*eth));
        skb_reset_network_header(skb);
        eth = eth_hdr(skb);
+       if (!pskb_may_pull(skb, sizeof(*iph)))
+               return true;
+
        iph = ip_hdr(skb);
        if (iph->version == 4) {
-               if (!ipv4_is_multicast(iph->daddr))
-                       return true;
                if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
                                   sizeof(*ihv3)))
                        return true;
 
+               if (!ipv4_is_multicast(iph->daddr))
+                       return true;
+
                ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
                skb_reset_transport_header(skb);
                skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
@@ -2345,15 +2360,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
                ip_eth_mc_map(iph->daddr, eth->h_dest);
 #if IS_ENABLED(CONFIG_IPV6)
        } else if (iph->version == 6) {
-               struct ipv6hdr *ip6h = ipv6_hdr(skb);
                struct mld2_query *mld2q;
+               struct ipv6hdr *ip6h;
 
-               if (!ipv6_addr_is_multicast(&ip6h->daddr))
-                       return true;
                if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
                                   sizeof(*mld2q)))
                        return true;
 
+               ip6h = ipv6_hdr(skb);
+               if (!ipv6_addr_is_multicast(&ip6h->daddr))
+                       return true;
+
                mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
                skb_reset_transport_header(skb);
                skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
@@ -2389,23 +2406,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
 {
        struct amt_header_membership_update *amtmu;
        struct amt_tunnel_list *tunnel;
-       struct udphdr *udph;
        struct ethhdr *eth;
        struct iphdr *iph;
-       int len;
+       int len, hdr_size;
 
        iph = ip_hdr(skb);
-       udph = udp_hdr(skb);
 
-       if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
-                                  false, false))
+       hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
+       if (!pskb_may_pull(skb, hdr_size))
                return true;
 
-       amtmu = (struct amt_header_membership_update *)skb->data;
+       amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
        if (amtmu->reserved || amtmu->version)
                return true;
 
-       skb_pull(skb, sizeof(*amtmu));
+       if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
+               return true;
+
        skb_reset_network_header(skb);
 
        list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
@@ -2426,6 +2443,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
        return true;
 
 report:
+       if (!pskb_may_pull(skb, sizeof(*iph)))
+               return true;
+
        iph = ip_hdr(skb);
        if (iph->version == 4) {
                if (ip_mc_check_igmp(skb)) {
@@ -2679,7 +2699,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
        amt = rcu_dereference_sk_user_data(sk);
        if (!amt) {
                err = true;
-               goto drop;
+               kfree_skb(skb);
+               goto out;
        }
 
        skb->dev = amt->dev;
index 8af4def..e531b93 100644 (file)
@@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
        for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
                err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
                                          gphy_fw_np, i);
-               if (err)
+               if (err) {
+                       of_node_put(gphy_fw_np);
                        goto remove_gphy;
+               }
                i++;
        }
 
index 7b37d45..d94150d 100644 (file)
@@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
 }
 
 static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
-                                         u16 ctrl, u16 status, u16 lpa,
+                                         u16 bmsr, u16 lpa, u16 status,
                                          struct phylink_link_state *state)
 {
+       state->link = false;
+
+       /* If the BMSR reports that the link had failed, report this to
+        * phylink.
+        */
+       if (!(bmsr & BMSR_LSTATUS))
+               return 0;
+
        state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+       state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
 
        if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
                /* The Spped and Duplex Resolved register is 1 if AN is enabled
                 * and complete, or if AN is disabled. So with disabled AN we
-                * still get here on link up. But we want to set an_complete
-                * only if AN was enabled, thus we look at BMCR_ANENABLE.
-                * (According to 802.3-2008 section 22.2.4.2.10, we should be
-                *  able to get this same value from BMSR_ANEGCAPABLE, but tests
-                *  show that these Marvell PHYs don't conform to this part of
-                *  the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+                * still get here on link up.
                 */
-               state->an_complete = !!(ctrl & BMCR_ANENABLE);
                state->duplex = status &
                                MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
                                                 DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                                   int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status, ctrl;
+       u16 bmsr, lpa, status;
        int err;
 
-       err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+       err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
        if (err) {
-               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
                return err;
        }
 
@@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
 }
 
 int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
        int port, int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status, ctrl;
+       u16 bmsr, lpa, status;
        int err;
 
        err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
-                                   MV88E6390_SGMII_BMCR, &ctrl);
+                                   MV88E6390_SGMII_BMSR, &bmsr);
        if (err) {
-               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
                return err;
        }
 
@@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
 }
 
 static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
index 3bb42a9..769f672 100644 (file)
@@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
        return 0;
 }
 
-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
-                                        phy_interface_t interface)
-{
-       int ext_int;
-
-       ext_int = rtl8365mb_extint_port_map[port];
-
-       if (ext_int < 0 &&
-           (interface == PHY_INTERFACE_MODE_NA ||
-            interface == PHY_INTERFACE_MODE_INTERNAL ||
-            interface == PHY_INTERFACE_MODE_GMII))
-               /* Internal PHY */
-               return true;
-       else if ((ext_int >= 1) &&
-                phy_interface_mode_is_rgmii(interface))
-               /* Extension MAC */
-               return true;
-
-       return false;
-}
-
 static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
                                       struct phylink_config *config)
 {
-       if (dsa_is_user_port(ds, port))
+       if (dsa_is_user_port(ds, port)) {
                __set_bit(PHY_INTERFACE_MODE_INTERNAL,
                          config->supported_interfaces);
-       else if (dsa_is_cpu_port(ds, port))
+
+               /* GMII is the default interface mode for phylib, so
+                * we have to support it for ports with integrated PHY.
+                */
+               __set_bit(PHY_INTERFACE_MODE_GMII,
+                         config->supported_interfaces);
+       } else if (dsa_is_cpu_port(ds, port)) {
                phy_interface_set_rgmii(config->supported_interfaces);
+       }
 
        config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
                                   MAC_10 | MAC_100 | MAC_1000FD;
@@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
        struct realtek_priv *priv = ds->priv;
        int ret;
 
-       if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
-               dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
-                       phy_modes(state->interface), port);
-               return;
-       }
-
        if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
                dev_err(priv->dev,
                        "port %d supports only conventional PHY or fixed-link\n",
index a381626..8c58285 100644 (file)
@@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
        mdio = mdiobus_alloc();
        if (mdio == NULL) {
                netdev_err(dev, "Error allocating MDIO bus\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto put_node;
        }
 
        mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
                           mdio->id);
                goto out_free_mdio;
        }
+       of_node_put(mdio_node);
 
        if (netif_msg_drv(priv))
                netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 out_free_mdio:
        mdiobus_free(mdio);
        mdio = NULL;
+put_node:
+       of_node_put(mdio_node);
        return ret;
 }
 
index c6f0039..d5f2c69 100644 (file)
@@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev)
                                pr_cont("\n");
                        }
                }
-               prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+               prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE;
                aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
                wmb(); /* drain writebuffer */
 
@@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
        ps->tx_packets++;
        ps->tx_bytes += ptxd->len;
 
-       ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+       ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE;
        wmb(); /* drain writebuffer */
        dev_kfree_skb(skb);
        aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
@@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
        /* Allocate the data buffers
         * Snooping works fine with eth on all au1xxx
         */
-       aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
-                                         (NUM_TX_BUFFS + NUM_RX_BUFFS),
-                                         &aup->dma_addr, 0);
+       aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
+                                       (NUM_TX_BUFFS + NUM_RX_BUFFS),
+                                       &aup->dma_addr, 0);
        if (!aup->vaddr) {
                dev_err(&pdev->dev, "failed to allocate data buffers\n");
                err = -ENOMEM;
@@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev)
        for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
                pDB->pnext = pDBfree;
                pDBfree = pDB;
-               pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
-               pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+               pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
+               pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i;
                pDB++;
        }
        aup->pDBfree = pDBfree;
@@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev)
                if (!pDB)
                        goto err_out;
 
-               aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+               aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
                aup->rx_db_inuse[i] = pDB;
        }
 
@@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev)
                if (!pDB)
                        goto err_out;
 
-               aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+               aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
                aup->tx_dma_ring[i]->len = 0;
                aup->tx_db_inuse[i] = pDB;
        }
@@ -1310,7 +1310,7 @@ err_remap2:
        iounmap(aup->mac);
 err_remap1:
        dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
-                       (void *)aup->vaddr, aup->dma_addr);
+                         aup->vaddr, aup->dma_addr);
 err_vaddr:
        free_netdev(dev);
 err_alloc:
@@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev)
                        au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
 
        dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
-                       (void *)aup->vaddr, aup->dma_addr);
+                         aup->vaddr, aup->dma_addr);
 
        iounmap(aup->macdma);
        iounmap(aup->mac);
index e3a3ed2..2489c2f 100644 (file)
@@ -106,8 +106,8 @@ struct au1000_private {
        struct mac_reg *mac;  /* mac registers                      */
        u32 *enable;     /* address of MAC Enable Register     */
        void __iomem *macdma;   /* base of MAC DMA port */
-       u32 vaddr;                /* virtual address of rx/tx buffers   */
-       dma_addr_t dma_addr;      /* dma address of rx/tx buffers       */
+       void *vaddr;            /* virtual address of rx/tx buffers   */
+       dma_addr_t dma_addr;    /* dma address of rx/tx buffers       */
 
        spinlock_t lock;       /* Serialise access to device */
 
index a359329..4d46780 100644 (file)
@@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 
        netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
        netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
-       netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+       netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
 
        for (i = 0; i < skb->len; i += 32) {
                unsigned int len = min(skb->len - i, 32U);
index 086739e..9b83d53 100644 (file)
@@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
        np = of_get_child_by_name(core->dev.of_node, "mdio");
 
        err = of_mdiobus_register(mii_bus, np);
+       of_node_put(np);
        if (err) {
                dev_err(&core->dev, "Registration of mii bus failed\n");
                goto err_free_bus;
index 7f11c0a..d4e63f0 100644 (file)
@@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
 
        switch (xcast_mode) {
        case IXGBEVF_XCAST_MODE_NONE:
-               disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+               disable = IXGBE_VMOLR_ROMPE |
                          IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
-               enable = 0;
+               enable = IXGBE_VMOLR_BAM;
                break;
        case IXGBEVF_XCAST_MODE_MULTI:
                disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
                        return -EPERM;
                }
 
-               disable = 0;
+               disable = IXGBE_VMOLR_VPE;
                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
-                        IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+                        IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
                break;
        default:
                return -EOPNOTSUPP;
index b3b3c07..59c9a10 100644 (file)
@@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
        return true;
 }
 
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+       unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+       unsigned long data;
+
+       data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+                               get_order(size));
+
+       return (void *)data;
+}
+
 /* the qdma core needs scratch memory to be setup */
 static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
@@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                        goto release_desc;
 
                /* alloc new buffer */
-               new_data = napi_alloc_frag(ring->frag_size);
+               if (ring->frag_size <= PAGE_SIZE)
+                       new_data = napi_alloc_frag(ring->frag_size);
+               else
+                       new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
                if (unlikely(!new_data)) {
                        netdev->stats.rx_dropped++;
                        goto release_desc;
@@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
                return -ENOMEM;
 
        for (i = 0; i < rx_dma_size; i++) {
-               ring->data[i] = netdev_alloc_frag(ring->frag_size);
+               if (ring->frag_size <= PAGE_SIZE)
+                       ring->data[i] = netdev_alloc_frag(ring->frag_size);
+               else
+                       ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
                if (!ring->data[i])
                        return -ENOMEM;
        }
index ed5038d..6400a82 100644 (file)
@@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
                        en_err(priv,
                               "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
                               i, offset, ee->len - i, ret);
-                       return 0;
+                       return ret;
                }
 
                i += ret;
index 0eb9d74..50422b5 100644 (file)
@@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other)
        return pci_get_drvdata(to_pci_dev(other));
 }
 
-static int next_phys_dev(struct device *dev, const void *data)
-{
-       struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
-
-       mdev = pci_get_other_drvdata(this->device, dev);
-       if (!mdev)
-               return 0;
-
-       return _next_phys_dev(mdev, data);
-}
-
 static int next_phys_dev_lag(struct device *dev, const void *data)
 {
        struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
@@ -623,13 +612,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
        return pci_get_drvdata(to_pci_dev(next));
 }
 
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
-{
-       lockdep_assert_held(&mlx5_intf_mutex);
-       return mlx5_get_next_dev(dev, &next_phys_dev);
-}
-
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
 {
index eae9aa9..978a2bb 100644 (file)
@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
        if (!tracer->owner)
                return;
 
+       if (unlikely(!tracer->str_db.loaded))
+               goto arm;
+
        block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
        start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
 
@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
                                                      &tmp_trace_block[TRACES_PER_BLOCK - 1]);
        }
 
+arm:
        mlx5_fw_tracer_arm(dev);
 }
 
@@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
                queue_work(tracer->work_queue, &tracer->ownership_change_work);
                break;
        case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
-               if (likely(tracer->str_db.loaded))
-                       queue_work(tracer->work_queue, &tracer->handle_traces_work);
+               queue_work(tracer->work_queue, &tracer->handle_traces_work);
                break;
        default:
                mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
index 6836448..3c1edfa 100644 (file)
@@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
        bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
-       bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+       bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+               MLX5_CAP_GEN(mdev, relaxed_ordering_write);
 
        return ro && lro_en ?
                MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
index 43a536c..c0f409c 100644 (file)
 
 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
 {
+       bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
        bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
        bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
 
-       MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
-       MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
+       MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
+       MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
 }
 
 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
index eb90e79..f797fd9 100644 (file)
@@ -950,6 +950,13 @@ err_event_reg:
        return err;
 }
 
+static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+       mlx5e_rep_tc_netdevice_event_unregister(rpriv);
+       mlx5e_rep_bond_cleanup(rpriv);
+       mlx5e_rep_tc_cleanup(rpriv);
+}
+
 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
                return err;
        }
 
-       err = mlx5e_tc_ht_init(&rpriv->tc_ht);
-       if (err)
-               goto err_ht_init;
-
        if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
                err = mlx5e_init_uplink_rep_tx(rpriv);
                if (err)
                        goto err_init_tx;
        }
 
+       err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+       if (err)
+               goto err_ht_init;
+
        return 0;
 
-err_init_tx:
-       mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
 err_ht_init:
+       if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+               mlx5e_cleanup_uplink_rep_tx(rpriv);
+err_init_tx:
        mlx5e_destroy_tises(priv);
        return err;
 }
 
-static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
-{
-       mlx5e_rep_tc_netdevice_event_unregister(rpriv);
-       mlx5e_rep_bond_cleanup(rpriv);
-       mlx5e_rep_tc_cleanup(rpriv);
-}
-
 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
 
-       mlx5e_destroy_tises(priv);
+       mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
 
        if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
                mlx5e_cleanup_uplink_rep_tx(rpriv);
 
-       mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+       mlx5e_destroy_tises(priv);
 }
 
 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
index 217cac2..2ce3728 100644 (file)
@@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
 
        switch (event) {
        case ESW_OFFLOADS_DEVCOM_PAIR:
-               if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
-                       break;
-
                if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
                    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
                        break;
@@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
        if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
                return;
 
+       if (!mlx5_is_lag_supported(esw->dev))
+               return;
+
        mlx5_devcom_register_component(devcom,
                                       MLX5_DEVCOM_ESW_OFFLOADS,
                                       mlx5_esw_offloads_devcom_event,
@@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
        if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
                return;
 
+       if (!mlx5_is_lag_supported(esw->dev))
+               return;
+
        mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
                               ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
 
index fdcf7f5..21e5c70 100644 (file)
@@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
        return NULL;
 }
 
-static bool check_conflicting_actions(u32 action1, u32 action2)
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
+                                          const struct mlx5_fs_vlan *vlan1)
 {
-       u32 xored_actions = action1 ^ action2;
+       return vlan0->ethtype != vlan1->ethtype ||
+              vlan0->vid != vlan1->vid ||
+              vlan0->prio != vlan1->prio;
+}
+
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
+                                     const struct mlx5_flow_act *act2)
+{
+       u32 action1 = act1->action;
+       u32 action2 = act2->action;
+       u32 xored_actions;
+
+       xored_actions = action1 ^ action2;
 
        /* if one rule only wants to count, it's ok */
        if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
@@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
                             MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
                return true;
 
+       if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+           act1->pkt_reformat != act2->pkt_reformat)
+               return true;
+
+       if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+           act1->modify_hdr != act2->modify_hdr)
+               return true;
+
+       if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+           check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
+               return true;
+
+       if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
+           check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
+               return true;
+
        return false;
 }
 
@@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
                                  const struct mlx5_flow_context *flow_context,
                                  const struct mlx5_flow_act *flow_act)
 {
-       if (check_conflicting_actions(flow_act->action, fte->action.action)) {
+       if (check_conflicting_actions(flow_act, &fte->action)) {
                mlx5_core_warn(get_dev(&fte->node),
                               "Found two FTEs with conflicting actions\n");
                return -EEXIST;
index 552b6e2..2a8fc54 100644 (file)
@@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
 {
        struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
        struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
-       struct lag_tracker tracker;
+       struct lag_tracker tracker = { };
        bool do_bond, roce_lag;
        int err;
        int i;
index 72f70fa..c81b173 100644 (file)
@@ -74,6 +74,16 @@ struct mlx5_lag {
        struct lag_mpesw          lag_mpesw;
 };
 
+static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
+{
+       if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+           !MLX5_CAP_GEN(dev, lag_master) ||
+           MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+           MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
+               return false;
+       return true;
+}
+
 static inline struct mlx5_lag *
 mlx5_lag_dev(struct mlx5_core_dev *dev)
 {
index 484cb1e..9cc7afe 100644 (file)
@@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev);
 void mlx5_detach_device(struct mlx5_core_dev *dev);
 int mlx5_register_device(struct mlx5_core_dev *dev);
 void mlx5_unregister_device(struct mlx5_core_dev *dev);
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
 void mlx5_dev_list_lock(void);
 void mlx5_dev_list_unlock(void);
index 443a5d6..7c31a46 100644 (file)
@@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
                key_size += sizeof(struct nfp_flower_ipv6);
        }
 
+       if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+               map[FLOW_PAY_QINQ] = key_size;
+               key_size += sizeof(struct nfp_flower_vlan);
+       }
+
        if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
                map[FLOW_PAY_GRE] = key_size;
                if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
@@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
                        key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
        }
 
-       if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
-               map[FLOW_PAY_QINQ] = key_size;
-               key_size += sizeof(struct nfp_flower_vlan);
-       }
-
        if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
            (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
                map[FLOW_PAY_UDP_TUN] = key_size;
@@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
                }
        }
 
+       if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+               offset = key_map[FLOW_PAY_QINQ];
+               key = kdata + offset;
+               msk = mdata + offset;
+               for (i = 0; i < _CT_TYPE_MAX; i++) {
+                       nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+                                               (struct nfp_flower_vlan *)msk,
+                                               rules[i]);
+               }
+       }
+
        if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
                offset = key_map[FLOW_PAY_GRE];
                key = kdata + offset;
@@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
                }
        }
 
-       if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
-               offset = key_map[FLOW_PAY_QINQ];
-               key = kdata + offset;
-               msk = mdata + offset;
-               for (i = 0; i < _CT_TYPE_MAX; i++) {
-                       nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
-                                               (struct nfp_flower_vlan *)msk,
-                                               rules[i]);
-               }
-       }
-
        if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
            key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
                offset = key_map[FLOW_PAY_UDP_TUN];
index 193a167..e014301 100644 (file)
@@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
                msk += sizeof(struct nfp_flower_ipv6);
        }
 
+       if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+               nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+                                       (struct nfp_flower_vlan *)msk,
+                                       rule);
+               ext += sizeof(struct nfp_flower_vlan);
+               msk += sizeof(struct nfp_flower_vlan);
+       }
+
        if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
                if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
                        struct nfp_flower_ipv6_gre_tun *gre_match;
@@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
                }
        }
 
-       if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
-               nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
-                                       (struct nfp_flower_vlan *)msk,
-                                       rule);
-               ext += sizeof(struct nfp_flower_vlan);
-               msk += sizeof(struct nfp_flower_vlan);
-       }
-
        if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
            key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
                if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
index 54af309..6eeeb0f 100644 (file)
@@ -15,7 +15,7 @@
 #include "nfp_net_sriov.h"
 
 static int
-nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
+nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn)
 {
        u16 cap_vf;
 
@@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
 
        cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP);
        if ((cap_vf & cap) != cap) {
-               nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
+               if (warn)
+                       nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
                return -EOPNOTSUPP;
        }
 
        if (vf < 0 || vf >= app->pf->num_vfs) {
-               nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
+               if (warn)
+                       nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
                return -EINVAL;
        }
 
@@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        unsigned int vf_offset;
        int err;
 
-       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac");
+       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true);
        if (err)
                return err;
 
@@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
        u32 vlan_tag;
        int err;
 
-       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
+       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true);
        if (err)
                return err;
 
@@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
        }
 
        /* Check if fw supports or not */
-       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto");
+       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true);
        if (err)
                is_proto_sup = false;
 
@@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf,
        u32 vf_offset, ratevalue;
        int err;
 
-       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate");
+       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true);
        if (err)
                return err;
 
@@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
        int err;
 
        err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF,
-                                 "spoofchk");
+                                 "spoofchk", true);
        if (err)
                return err;
 
@@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
        int err;
 
        err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
-                                 "trust");
+                                 "trust", true);
        if (err)
                return err;
 
@@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
        int err;
 
        err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE,
-                                 "link_state");
+                                 "link_state", true);
        if (err)
                return err;
 
@@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
        u8 flags;
        int err;
 
-       err = nfp_net_sriov_check(app, vf, 0, "");
+       err = nfp_net_sriov_check(app, vf, 0, "", true);
        if (err)
                return err;
 
@@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
 
        ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
        ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
-       if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"))
+       if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false))
                ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
        ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
        ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
        ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
 
-       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate");
+       err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false);
        if (!err) {
                rate = readl(app->pf->vfcfg_tbl2 + vf_offset +
                             NFP_NET_VF_CFG_RATE);
index f9f8093..38fe77d 100644 (file)
@@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
 
        ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
        if (ret) {
-               goto err_dvr_probe;
+               goto err_alloc_irq;
        }
 
        return 0;
 
-err_dvr_probe:
-       pci_free_irq_vectors(pdev);
 err_alloc_irq:
        clk_disable_unprepare(plat->stmmac_clk);
        clk_unregister_fixed_rate(plat->stmmac_clk);
index 8561f2d..13dafe7 100644 (file)
 #define DP83867_DOWNSHIFT_2_COUNT      2
 #define DP83867_DOWNSHIFT_4_COUNT      4
 #define DP83867_DOWNSHIFT_8_COUNT      8
+#define DP83867_SGMII_AUTONEG_EN       BIT(7)
 
 /* CFG3 bits */
 #define DP83867_CFG3_INT_OE                    BIT(7)
@@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev)
                         DP83867_PHYCR_FORCE_LINK_GOOD, 0);
 }
 
+static void dp83867_link_change_notify(struct phy_device *phydev)
+{
+       /* There is a limitation in DP83867 PHY device where SGMII AN is
+        * only triggered once after the device is booted up. Even after the
+        * PHY TPI is down and up again, SGMII AN is not triggered and
+        * hence no new in-band message from PHY to MAC side SGMII.
+        * This could cause an issue during power up, when PHY is up prior
+        * to MAC. At this condition, once MAC side SGMII is up, MAC side
+        * SGMII wouldn`t receive new in-band message from TI PHY with
+        * correct link status, speed and duplex info.
+        * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
+        * whenever there is a link change.
+        */
+       if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               int val = 0;
+
+               val = phy_clear_bits(phydev, DP83867_CFG2,
+                                    DP83867_SGMII_AUTONEG_EN);
+               if (val < 0)
+                       return;
+
+               phy_set_bits(phydev, DP83867_CFG2,
+                            DP83867_SGMII_AUTONEG_EN);
+       }
+}
+
 static struct phy_driver dp83867_driver[] = {
        {
                .phy_id         = DP83867_PHY_ID,
@@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = {
 
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
+
+               .link_change_notify = dp83867_link_change_notify,
        },
 };
 module_phy_driver(dp83867_driver);
index 58d6029..8a2dbe8 100644 (file)
@@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(mdio_bus_init);
 
 #if IS_ENABLED(CONFIG_PHYLIB)
 void mdio_bus_exit(void)
index a99aedf..ea73094 100644 (file)
@@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
        int err;
 
        while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+               usb_anchor_urb(urb, &drv_data->tx_anchor);
+
                err = usb_submit_urb(urb, GFP_ATOMIC);
-               if (err)
+               if (err) {
+                       kfree(urb->setup_packet);
+                       usb_unanchor_urb(urb);
+                       usb_free_urb(urb);
                        break;
+               }
 
                drv_data->tx_in_flight++;
+               usb_free_urb(urb);
+       }
+
+       /* Cleanup the rest deferred urbs. */
+       while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+               kfree(urb->setup_packet);
+               usb_free_urb(urb);
        }
-       usb_scuttle_anchored_urbs(&drv_data->deferred);
 }
 
 static int nfcmrvl_resume(struct usb_interface *intf)
index 7e213f8..df8d27c 100644 (file)
@@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
        int r = 0;
        struct device *dev = &hdev->ndev->dev;
        struct nfc_evt_transaction *transaction;
+       u32 aid_len;
+       u8 params_len;
 
        pr_debug("connectivity gate event: %x\n", event);
 
@@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
                r = nfc_se_connectivity(hdev->ndev, host);
        break;
        case ST21NFCA_EVT_TRANSACTION:
-               /*
-                * According to specification etsi 102 622
+               /* According to specification etsi 102 622
                 * 11.2.2.4 EVT_TRANSACTION Table 52
                 * Description  Tag     Length
                 * AID          81      5 to 16
                 * PARAMETERS   82      0 to 255
+                *
+                * The key differences are aid storage length is variably sized
+                * in the packet, but fixed in nfc_evt_transaction, and that the aid_len
+                * is u8 in the packet, but u32 in the structure, and the tags in
+                * the packet are not included in nfc_evt_transaction.
+                *
+                * size in bytes: 1          1       5-16 1             1           0-255
+                * offset:        0          1       2    aid_len + 2   aid_len + 3 aid_len + 4
+                * member name:   aid_tag(M) aid_len aid  params_tag(M) params_len  params
+                * example:       0x81       5-16    X    0x82 0-255    X
                 */
-               if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
-                   skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+               if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
                        return -EPROTO;
 
-               transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
-               if (!transaction)
-                       return -ENOMEM;
-
-               transaction->aid_len = skb->data[1];
+               aid_len = skb->data[1];
 
-               /* Checking if the length of the AID is valid */
-               if (transaction->aid_len > sizeof(transaction->aid))
-                       return -EINVAL;
+               if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
+                       return -EPROTO;
 
-               memcpy(transaction->aid, &skb->data[2],
-                      transaction->aid_len);
+               params_len = skb->data[aid_len + 3];
 
-               /* Check next byte is PARAMETERS tag (82) */
-               if (skb->data[transaction->aid_len + 2] !=
-                   NFC_EVT_TRANSACTION_PARAMS_TAG)
+               /* Verify PARAMETERS tag is (82), and final check that there is enough
+                * space in the packet to read everything.
+                */
+               if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
+                   (skb->len < aid_len + 4 + params_len))
                        return -EPROTO;
 
-               transaction->params_len = skb->data[transaction->aid_len + 3];
+               transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
+               if (!transaction)
+                       return -ENOMEM;
 
-               /* Total size is allocated (skb->len - 2) minus fixed array members */
-               if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
-                       return -EINVAL;
+               transaction->aid_len = aid_len;
+               transaction->params_len = params_len;
 
-               memcpy(transaction->params, skb->data +
-                      transaction->aid_len + 4, transaction->params_len);
+               memcpy(transaction->aid, &skb->data[2], aid_len);
+               memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
 
                r = nfc_se_transaction(hdev->ndev, host, transaction);
        break;
index 72df4b8..09c7829 100644 (file)
@@ -85,7 +85,7 @@ config NVSW_SN2201
        depends on I2C
        depends on REGMAP_I2C
        help
-         This driver provides support for the Nvidia SN2201 platfom.
+         This driver provides support for the Nvidia SN2201 platform.
          The SN2201 is a highly integrated for one rack unit system with
          L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28
          ports in a compact 1RU form factor. The system also including a
index 0bcdc7c..2923daf 100644 (file)
@@ -326,7 +326,7 @@ static struct resource nvsw_sn2201_lpc_res[] = {
 };
 
 /* SN2201 I2C platform data. */
-struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
+static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
        .irq = NVSW_SN2201_CPLD_SYSIRQ,
 };
 
index d421e14..6b51ad0 100644 (file)
@@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES
 if MIPS_PLATFORM_DEVICES
 
 config CPU_HWMON
-       tristate "Loongson-3 CPU HWMon Driver"
+       bool "Loongson-3 CPU HWMon Driver"
        depends on MACH_LOONGSON64
        select HWMON
        default y
index 0553428..8dd6723 100644 (file)
@@ -405,11 +405,14 @@ MODULE_DEVICE_TABLE(dmi, dmi_ids);
 static int __init p50_module_init(void)
 {
        struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1);
+       int ret;
 
        if (!dmi_first_match(dmi_ids))
                return -ENODEV;
 
-       platform_driver_register(&p50_gpio_driver);
+       ret = platform_driver_register(&p50_gpio_driver);
+       if (ret)
+               return ret;
 
        gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1);
        if (IS_ERR(gpio_pdev)) {
index 1ef606e..497ad2f 100644 (file)
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
        }}
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
@@ -156,6 +157,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
        { }
 };
 
index 667f94b..0d8cb22 100644 (file)
@@ -38,6 +38,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
 #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
 #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
 #define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
 
 /* DMI board names of devices that should use the omen specific path for
  * thermal profiles.
@@ -220,6 +221,7 @@ static struct input_dev *hp_wmi_input_dev;
 static struct platform_device *hp_wmi_platform_dev;
 static struct platform_profile_handler platform_profile_handler;
 static bool platform_profile_support;
+static bool zero_insize_support;
 
 static struct rfkill *wifi_rfkill;
 static struct rfkill *bluetooth_rfkill;
@@ -290,14 +292,16 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
        struct bios_return *bios_return;
        union acpi_object *obj = NULL;
        struct bios_args *args = NULL;
-       int mid, actual_outsize, ret;
+       int mid, actual_insize, actual_outsize;
        size_t bios_args_size;
+       int ret;
 
        mid = encode_outsize_for_pvsz(outsize);
        if (WARN_ON(mid < 0))
                return mid;
 
-       bios_args_size = struct_size(args, data, insize);
+       actual_insize = max(insize, 128);
+       bios_args_size = struct_size(args, data, actual_insize);
        args = kmalloc(bios_args_size, GFP_KERNEL);
        if (!args)
                return -ENOMEM;
@@ -374,7 +378,7 @@ static int hp_wmi_read_int(int query)
        int val = 0, ret;
 
        ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
-                                  0, sizeof(val));
+                                  zero_if_sup(val), sizeof(val));
 
        if (ret)
                return ret < 0 ? ret : -EINVAL;
@@ -410,7 +414,8 @@ static int hp_wmi_get_tablet_mode(void)
                return -ENODEV;
 
        ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
-                                  system_device_mode, 0, sizeof(system_device_mode));
+                                  system_device_mode, zero_if_sup(system_device_mode),
+                                  sizeof(system_device_mode));
        if (ret < 0)
                return ret;
 
@@ -497,7 +502,7 @@ static int hp_wmi_fan_speed_max_get(void)
        int val = 0, ret;
 
        ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
-                                  &val, 0, sizeof(val));
+                                  &val, zero_if_sup(val), sizeof(val));
 
        if (ret)
                return ret < 0 ? ret : -EINVAL;
@@ -509,7 +514,7 @@ static int __init hp_wmi_bios_2008_later(void)
 {
        int state = 0;
        int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
-                                      0, sizeof(state));
+                                      zero_if_sup(state), sizeof(state));
        if (!ret)
                return 1;
 
@@ -520,7 +525,7 @@ static int __init hp_wmi_bios_2009_later(void)
 {
        u8 state[128];
        int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
-                                      0, sizeof(state));
+                                      zero_if_sup(state), sizeof(state));
        if (!ret)
                return 1;
 
@@ -598,7 +603,7 @@ static int hp_wmi_rfkill2_refresh(void)
        int err, i;
 
        err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
-                                  0, sizeof(state));
+                                  zero_if_sup(state), sizeof(state));
        if (err)
                return err;
 
@@ -1007,7 +1012,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
        int err, i;
 
        err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
-                                  0, sizeof(state));
+                                  zero_if_sup(state), sizeof(state));
        if (err)
                return err < 0 ? err : -EINVAL;
 
@@ -1483,11 +1488,15 @@ static int __init hp_wmi_init(void)
 {
        int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
        int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
-       int err;
+       int err, tmp = 0;
 
        if (!bios_capable && !event_capable)
                return -ENODEV;
 
+       if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
+                                sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
+               zero_insize_support = true;
+
        if (event_capable) {
                err = hp_wmi_input_setup();
                if (err)
index 216d31e..79cff1f 100644 (file)
@@ -122,6 +122,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+               },
+       },
        { }
 };
 
index edaf22e..40183bd 100644 (file)
@@ -1912,6 +1912,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &tgl_reg_map),
        {}
 };
 
index 34daf9d..ace1239 100644 (file)
@@ -282,7 +282,7 @@ static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
        auxiliary_set_drvdata(auxdev, priv);
 
        for (i = 0; i < intel_vsec_dev->num_resources; i++) {
-               struct intel_pmt_entry *entry = &priv->entry[i].entry;
+               struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry;
 
                ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
                if (ret < 0)
index 256ec6d..9d01a3e 100644 (file)
@@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
                                        GFP_KERNEL);
 
                if (!ioa_cfg->hrrq[i].host_rrq)  {
-                       while (--i > 0)
+                       while (--i >= 0)
                                dma_free_coherent(&pdev->dev,
                                        sizeof(u32) * ioa_cfg->hrrq[i].size,
                                        ioa_cfg->hrrq[i].host_rrq,
@@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
                        ioa_cfg->vectors_info[i].desc,
                        &ioa_cfg->hrrq[i]);
                if (rc) {
-                       while (--i >= 0)
+                       while (--i > 0)
                                free_irq(pci_irq_vector(pdev, i),
                                        &ioa_cfg->hrrq[i]);
                        return rc;
index b1be0dd..f5d7495 100644 (file)
@@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
                             uint32_t);
 void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
                             struct lpfc_iocbq *);
-void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
-                             struct lpfc_wcqe_complete *w);
 
 void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
 
@@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
                        struct lpfc_nodelist *ndlp);
 void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
                                struct lpfc_iocbq *cmdiocb,
-                               struct lpfc_wcqe_complete *abts_cmpl);
+                               struct lpfc_iocbq *rspiocb);
 void lpfc_create_multixri_pools(struct lpfc_hba *phba);
 void lpfc_create_destroy_pools(struct lpfc_hba *phba);
 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
index 9d36b20..13dfe28 100644 (file)
@@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
        memset(bpl, 0, sizeof(struct ulp_bde64));
        bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
        bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
-       bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+       bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
        bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
        bpl->tus.w = le32_to_cpu(bpl->tus.w);
 
index 07f9a6e..3fababb 100644 (file)
@@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 ndlp->nlp_DID, ulp_status,
                                 ulp_word4);
 
-               /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
                if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
-                       lpfc_disc_state_machine(vport, ndlp, cmdiocb,
-                                               NLP_EVT_DEVICE_RM);
                        skip_recovery = 1;
                        goto out;
                }
@@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                spin_unlock_irq(&ndlp->lock);
                lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                        NLP_EVT_DEVICE_RM);
-               lpfc_els_free_iocb(phba, cmdiocb);
-               lpfc_nlp_put(ndlp);
-
-               /* Presume the node was released. */
-               return;
+               goto out_rsrc_free;
        }
 
 out:
-       /* Driver is done with the IO.  */
-       lpfc_els_free_iocb(phba, cmdiocb);
-       lpfc_nlp_put(ndlp);
-
        /* At this point, the LOGO processing is complete. NOTE: For a
         * pt2pt topology, we are assuming the NPortID will only change
         * on link up processing. For a LOGO / PLOGI initiated by the
@@ -3059,6 +3048,10 @@ out:
                                 ndlp->nlp_DID, ulp_status,
                                 ulp_word4, tmo,
                                 vport->num_disc_nodes);
+
+               lpfc_els_free_iocb(phba, cmdiocb);
+               lpfc_nlp_put(ndlp);
+
                lpfc_disc_start(vport);
                return;
        }
@@ -3075,6 +3068,10 @@ out:
                lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                        NLP_EVT_DEVICE_RM);
        }
+out_rsrc_free:
+       /* Driver is done with the I/O. */
+       lpfc_els_free_iocb(phba, cmdiocb);
+       lpfc_nlp_put(ndlp);
 }
 
 /**
index 8511369..f024415 100644 (file)
@@ -4487,6 +4487,9 @@ struct wqe_common {
 #define wqe_sup_SHIFT         6
 #define wqe_sup_MASK          0x00000001
 #define wqe_sup_WORD          word11
+#define wqe_ffrq_SHIFT         6
+#define wqe_ffrq_MASK          0x00000001
+#define wqe_ffrq_WORD          word11
 #define wqe_wqec_SHIFT        7
 #define wqe_wqec_MASK         0x00000001
 #define wqe_wqec_WORD         word11
index 93b94c6..750dd1e 100644 (file)
@@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
        rc = pci_enable_msi(phba->pcidev);
        if (!rc)
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "0462 PCI enable MSI mode success.\n");
+                               "0012 PCI enable MSI mode success.\n");
        else {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0471 PCI enable MSI mode failed (%d)\n", rc);
index 639f866..b86ff9f 100644 (file)
@@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                lpfc_nvmet_invalidate_host(phba, ndlp);
 
        if (ndlp->nlp_DID == Fabric_DID) {
-               if (vport->port_state <= LPFC_FDISC)
+               if (vport->port_state <= LPFC_FDISC ||
+                   vport->fc_flag & FC_PT2PT)
                        goto out;
                lpfc_linkdown_port(vport);
                spin_lock_irq(shost->host_lock);
index 335e906..cd10ee6 100644 (file)
@@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                        nCmd->rcv_rsplen = wcqe->parameter;
                        nCmd->status = 0;
 
+                       /* Get the NVME cmd details for this unique error. */
+                       cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+                       ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
                        /* Check if this is really an ERSP */
                        if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
                                lpfc_ncmd->status = IOSTAT_SUCCESS;
                                lpfc_ncmd->result = 0;
 
                                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
-                                        "6084 NVME Completion ERSP: "
-                                        "xri %x placed x%x\n",
-                                        lpfc_ncmd->cur_iocbq.sli4_xritag,
-                                        wcqe->total_data_placed);
+                                       "6084 NVME FCP_ERR ERSP: "
+                                       "xri %x placed x%x opcode x%x cmd_id "
+                                       "x%x cqe_status x%x\n",
+                                       lpfc_ncmd->cur_iocbq.sli4_xritag,
+                                       wcqe->total_data_placed,
+                                       cp->sqe.common.opcode,
+                                       cp->sqe.common.command_id,
+                                       ep->cqe.status);
                                break;
                        }
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                         "6081 NVME Completion Protocol Error: "
                                         "xri %x status x%x result x%x "
-                                        "placed x%x\n",
+                                        "placed x%x opcode x%x cmd_id x%x, "
+                                        "cqe_status x%x\n",
                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
                                         lpfc_ncmd->status, lpfc_ncmd->result,
-                                        wcqe->total_data_placed);
+                                        wcqe->total_data_placed,
+                                        cp->sqe.common.opcode,
+                                        cp->sqe.common.command_id,
+                                        ep->cqe.status);
                        break;
                case IOSTAT_LOCAL_REJECT:
                        /* Let fall through to set command final state. */
@@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
 {
        struct lpfc_hba *phba = vport->phba;
        struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
-       struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+       struct nvme_common_command *sqe;
+       struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
        union lpfc_wqe128 *wqe = &pwqeq->wqe;
        uint32_t req_len;
 
@@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
                cstat->control_requests++;
        }
 
-       if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+       if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
                bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+               sqe = &((struct nvme_fc_cmd_iu *)
+                       nCmd->cmdaddr)->sqe.common;
+               if (sqe->opcode == nvme_admin_async_event)
+                       bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+       }
+
        /*
         * Finish initializing those WQE fields that are independent
         * of the nvme_cmnd request_buffer
@@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
  * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
  * @phba: Pointer to HBA context object
  * @cmdiocb: Pointer to command iocb object.
- * @abts_cmpl: Pointer to wcqe complete object.
+ * @rspiocb: Pointer to response iocb object.
  *
  * This is the callback function for any NVME FCP IO that was aborted.
  *
@@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
  **/
 void
 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
-                          struct lpfc_wcqe_complete *abts_cmpl)
+                          struct lpfc_iocbq *rspiocb)
 {
+       struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
+
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
                        "6145 ABORT_XRI_CN completing on rpi x%x "
                        "original iotag x%x, abort cmd iotag x%x "
@@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
        struct lpfc_nvme_fcpreq_priv *freqpriv;
        unsigned long flags;
        int ret_val;
+       struct nvme_fc_cmd_iu *cp;
 
        /* Validate pointers. LLDD fault handling with transport does
         * have timing races.
@@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
                return;
        }
 
+       /*
+        * Get Command Id from cmd to plug into response. This
+        * code is not needed in the next NVME Transport drop.
+        */
+       cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
                         "6138 Transport Abort NVME Request Issued for "
-                        "ox_id x%x\n",
-                        nvmereq_wqe->sli4_xritag);
+                        "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
+                        nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
+                        cp->sqe.common.command_id);
        return;
 
 out_unlock:
index d439682..ba5e401 100644 (file)
@@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
        int status;
        u32 logit = LOG_FCP;
 
+       if (!rport)
+               return FAILED;
+
        rdata = rport->dd_data;
        if (!rdata || !rdata->pnode) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
        unsigned long flags;
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 
+       if (!rport)
+               return FAILED;
+
        rdata = rport->dd_data;
        if (!rdata || !rdata->pnode) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
index 6ed696c..80ac3a0 100644 (file)
@@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
        sync_buf = __lpfc_sli_get_iocbq(phba);
        if (!sync_buf) {
                lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
-                               "6213 No available WQEs for CMF_SYNC_WQE\n");
+                               "6244 No available WQEs for CMF_SYNC_WQE\n");
                ret_val = ENOMEM;
                goto out_unlock;
        }
@@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                                                set_job_ulpword4(cmdiocbp,
                                                                 IOERR_ABORT_REQUESTED);
                                                /*
-                                                * For SLI4, irsiocb contains
+                                                * For SLI4, irspiocb contains
                                                 * NO_XRI in sli_xritag, it
                                                 * shall not affect releasing
                                                 * sgl (xri) process.
@@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                                        }
                                }
                        }
-                       (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
+                       cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
                } else
                        lpfc_sli_release_iocbq(phba, cmdiocbp);
        } else {
@@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                                cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
                        if (cmdiocbq->cmd_cmpl) {
                                spin_unlock_irqrestore(&phba->hbalock, iflag);
-                               (cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
-                                                     &rspiocbq);
+                               cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
                                spin_lock_irqsave(&phba->hbalock, iflag);
                        }
                        break;
@@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
  * @flag: Flag indicating if this command can be put into txq.
  *
  * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
- * send  an iocb command to an HBA with SLI-4 interface spec.
+ * send  an iocb command to an HBA with SLI-3 interface spec.
  *
  * This function takes the hbalock before invoking the lockless version.
  * The function will return success after it successfully submit the wqe to
@@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
                cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
                cmdiocbq->wait_cmd_cmpl = NULL;
                if (cmdiocbq->cmd_cmpl)
-                       (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
+                       cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
                else
                        lpfc_sli_release_iocbq(phba, cmdiocbq);
                return;
@@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
 
        /* Set the exchange busy flag for task management commands */
        if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
-               !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
+           !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
                lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
-                       cur_iocbq);
+                                       cur_iocbq);
                if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
                        lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
                else
@@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
  * @irspiocbq: Pointer to work-queue completion queue entry.
  *
  * This routine handles an ELS work-queue completion event and construct
- * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
  * discovery engine to handle.
  *
  * Return: Pointer to the receive IOCBQ, NULL otherwise.
@@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
 
        if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
                spin_lock_irqsave(&phba->hbalock, iflags);
-               cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+               irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
                spin_unlock_irqrestore(&phba->hbalock, iflags);
        }
 
@@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                /* Pass the cmd_iocb and the wcqe to the upper layer */
                memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
                       sizeof(struct lpfc_wcqe_complete));
-               (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
+               cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
        } else {
                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
                                "0375 FCP cmdiocb not callback function "
@@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
 
        /* Free iocb created in lpfc_prep_seq */
        list_for_each_entry_safe(curr_iocb, next_iocb,
-               &iocbq->list, list) {
+                                &iocbq->list, list) {
                list_del_init(&curr_iocb->list);
                lpfc_sli_release_iocbq(phba, curr_iocb);
        }
index 4fab79e..2ab6f7d 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "14.2.0.3"
+#define LPFC_DRIVER_VERSION "14.2.0.4"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 37d46ae..9a1ae52 100644 (file)
@@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
        Mpi2ConfigReply_t mpi_reply;
        Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
        Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+       u16 depth;
        int sz;
        int rc = 0;
 
@@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
                goto out;
        /* sas iounit page 1 */
        sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
-       sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+       sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
        if (!sas_iounit_pg1) {
                pr_err("%s: failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
@@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
                    ioc->name, __FILE__, __LINE__, __func__);
                goto out;
        }
-       ioc->max_wideport_qd =
-           (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
-           le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
-           MPT3SAS_SAS_QUEUE_DEPTH;
-       ioc->max_narrowport_qd =
-           (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
-           le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
-           MPT3SAS_SAS_QUEUE_DEPTH;
-       ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
-           sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+
+       depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
+       ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+       depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
+       ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+       depth = sas_iounit_pg1->SATAMaxQDepth;
+       ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
+
        /* pcie iounit page 1 */
        rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
            &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
index bfce601..836ddc4 100644 (file)
@@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
        return 0;
 
 out_unwind:
-       while (--i > 0)
+       while (--i >= 0)
                free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
        pci_free_irq_vectors(pdev);
        return rc;
index 895b56c..a1a2ac0 100644 (file)
@@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp)
                goto out;
 
        /* We must have at least a 64B header and one 32B range descriptor */
-       vpd_len = get_unaligned_be16(&buffer[2]) + 3;
+       vpd_len = get_unaligned_be16(&buffer[2]) + 4;
        if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
                sd_printk(KERN_ERR, sdkp,
                          "Invalid Concurrent Positioning Ranges VPD page\n");
index 51a82f7..9d16cf9 100644 (file)
@@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc {
        u8      tag;
        u8      bus;
        u8      target;
-       u     vcpuHint;
-       u8      unused[59];
+       u16     vcpuHint;
+       u8      unused[58];
 } __packed;
 
 /*
index b7a9554..1b6d46b 100644 (file)
@@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue {
 
        /* Resources for implementing the notification channel from the device
         * to the driver. fwqp is the firmware end of an RC connection; the
-        * other end is vqqp used by the driver. cq is is where completions are
+        * other end is vqqp used by the driver. cq is where completions are
         * reported.
         */
        struct mlx5_vdpa_cq cq;
@@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
                id = mlx5vdpa16_to_cpu(mvdev, vlan);
                mac_vlan_del(ndev, ndev->config.mac, id, true);
+               status = VIRTIO_NET_OK;
                break;
        default:
-       break;
-}
+               break;
+       }
 
-return status;
+       return status;
 }
 
 static void mlx5_cvq_kick_handler(struct work_struct *work)
index d503848..776ad74 100644 (file)
@@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config,
 
        dev->minor = ret;
        dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
-       dev->dev = device_create(vduse_class, NULL,
-                                MKDEV(MAJOR(vduse_major), dev->minor),
-                                dev, "%s", config->name);
+       dev->dev = device_create_with_groups(vduse_class, NULL,
+                               MKDEV(MAJOR(vduse_major), dev->minor),
+                               dev, vduse_dev_groups, "%s", config->name);
        if (IS_ERR(dev->dev)) {
                ret = PTR_ERR(dev->dev);
                goto err_dev;
@@ -1596,7 +1596,6 @@ static int vduse_init(void)
                return PTR_ERR(vduse_class);
 
        vduse_class->devnode = vduse_devnode;
-       vduse_class->dev_groups = vduse_dev_groups;
 
        ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
        if (ret)
index 935a1d0..5ad2596 100644 (file)
@@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                ops->set_vq_ready(vdpa, idx, s.num);
                return 0;
        case VHOST_VDPA_GET_VRING_GROUP:
+               if (!ops->get_vq_group)
+                       return -EOPNOTSUPP;
                s.index = idx;
                s.num = ops->get_vq_group(vdpa, idx);
                if (s.num >= vdpa->ngroups)
index 14e2043..eab55ac 100644 (file)
@@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
             int (*copy)(const struct vringh *vrh,
                         void *dst, const void *src, size_t len))
 {
-       int err, count = 0, up_next, desc_max;
+       int err, count = 0, indirect_count = 0, up_next, desc_max;
        struct vring_desc desc, *descs;
        struct vringh_range range = { -1ULL, 0 }, slowrange;
        bool slow = false;
@@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i,
                        continue;
                }
 
-               if (count++ == vrh->vring.num) {
+               if (up_next == -1)
+                       count++;
+               else
+                       indirect_count++;
+
+               if (count > vrh->vring.num || indirect_count > desc_max) {
                        vringh_bad("Descriptor loop in %p", descs);
                        err = -ELOOP;
                        goto fail;
@@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
                                i = return_from_indirect(vrh, &up_next,
                                                         &descs, &desc_max);
                                slow = false;
+                               indirect_count = 0;
                        } else
                                break;
                }
index b5adf6a..a6dc8b5 100644 (file)
@@ -6,12 +6,6 @@ config VIRTIO
          bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
          or CONFIG_S390_GUEST.
 
-config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
-       bool
-       help
-         This option is selected if the architecture may need to enforce
-         VIRTIO_F_ACCESS_PLATFORM
-
 config VIRTIO_PCI_LIB
        tristate
        help
index ef04a96..6bace84 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/idr.h>
 #include <linux/of.h>
+#include <linux/platform-feature.h>
 #include <uapi/linux/virtio_ids.h>
 
 /* Unique numbering for virtio devices. */
@@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status);
 static int virtio_features_ok(struct virtio_device *dev)
 {
        unsigned int status;
-       int ret;
 
        might_sleep();
 
-       ret = arch_has_restricted_virtio_memory_access();
-       if (ret) {
+       if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) {
                if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
                        dev_warn(&dev->dev,
                                 "device must provide VIRTIO_F_VERSION_1\n");
index f9a36bc..c9bec38 100644 (file)
@@ -255,7 +255,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
 
        /*
         * Per memory-barriers.txt, wmb() is not needed to guarantee
-        * that the the cache coherent memory writes have completed
+        * that the cache coherent memory writes have completed
         * before writing to the MMIO region.
         */
        writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
@@ -701,6 +701,7 @@ static int vm_cmdline_set(const char *device,
        if (!vm_cmdline_parent_registered) {
                err = device_register(&vm_cmdline_parent);
                if (err) {
+                       put_device(&vm_cmdline_parent);
                        pr_err("Failed to register parent device!\n");
                        return err;
                }
index a0fa14f..b790f30 100644 (file)
@@ -469,7 +469,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
 
        /*
         * Per memory-barriers.txt, wmb() is not needed to guarantee
-        * that the the cache coherent memory writes have completed
+        * that the cache coherent memory writes have completed
         * before writing to the MMIO region.
         */
        vp_iowrite8(status, &cfg->device_status);
index 120d32f..bfd5f4f 100644 (file)
@@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC
          having to balloon out RAM regions in order to obtain physical memory
          space to create such mappings.
 
+config XEN_GRANT_DMA_IOMMU
+       bool
+       select IOMMU_API
+
+config XEN_GRANT_DMA_OPS
+       bool
+       select DMA_OPS
+
+config XEN_VIRTIO
+       bool "Xen virtio support"
+       depends on VIRTIO
+       select XEN_GRANT_DMA_OPS
+       select XEN_GRANT_DMA_IOMMU if OF
+       help
+         Enable virtio support for running as Xen guest. Depending on the
+         guest type this will require special support on the backend side
+         (qemu or kernel, depending on the virtio device types used).
+
+         If in doubt, say n.
+
 endmenu
index 5aae66e..c0503f1 100644 (file)
@@ -39,3 +39,5 @@ xen-gntalloc-y                                := gntalloc.o
 xen-privcmd-y                          := privcmd.o privcmd-buf.o
 obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF)    += xen-front-pgdir-shbuf.o
 obj-$(CONFIG_XEN_UNPOPULATED_ALLOC)    += unpopulated-alloc.o
+obj-$(CONFIG_XEN_GRANT_DMA_OPS)                += grant-dma-ops.o
+obj-$(CONFIG_XEN_GRANT_DMA_IOMMU)      += grant-dma-iommu.o
diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
new file mode 100644 (file)
index 0000000..16b8bc0
--- /dev/null
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Stub IOMMU driver which does nothing.
+ * The main purpose of it being present is to reuse generic IOMMU device tree
+ * bindings by Xen grant DMA-mapping layer.
+ *
+ * Copyright (C) 2022 EPAM Systems Inc.
+ */
+
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+struct grant_dma_iommu_device {
+       struct device *dev;
+       struct iommu_device iommu;
+};
+
+/* Nothing is really needed here */
+static const struct iommu_ops grant_dma_iommu_ops;
+
+static const struct of_device_id grant_dma_iommu_of_match[] = {
+       { .compatible = "xen,grant-dma" },
+       { },
+};
+
+static int grant_dma_iommu_probe(struct platform_device *pdev)
+{
+       struct grant_dma_iommu_device *mmu;
+       int ret;
+
+       mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
+       if (!mmu)
+               return -ENOMEM;
+
+       mmu->dev = &pdev->dev;
+
+       ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, mmu);
+
+       return 0;
+}
+
+static int grant_dma_iommu_remove(struct platform_device *pdev)
+{
+       struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev);
+
+       platform_set_drvdata(pdev, NULL);
+       iommu_device_unregister(&mmu->iommu);
+
+       return 0;
+}
+
+static struct platform_driver grant_dma_iommu_driver = {
+       .driver = {
+               .name = "grant-dma-iommu",
+               .of_match_table = grant_dma_iommu_of_match,
+       },
+       .probe = grant_dma_iommu_probe,
+       .remove = grant_dma_iommu_remove,
+};
+
+static int __init grant_dma_iommu_init(void)
+{
+       struct device_node *iommu_np;
+
+       iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match);
+       if (!iommu_np)
+               return 0;
+
+       of_node_put(iommu_np);
+
+       return platform_driver_register(&grant_dma_iommu_driver);
+}
+subsys_initcall(grant_dma_iommu_init);
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
new file mode 100644 (file)
index 0000000..fc01424
--- /dev/null
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xen grant DMA-mapping layer - contains special DMA-mapping routines
+ * for providing grant references as DMA addresses to be used by frontends
+ * (e.g. virtio) in Xen guests
+ *
+ * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
+ */
+
+#include <linux/module.h>
+#include <linux/dma-map-ops.h>
+#include <linux/of.h>
+#include <linux/pfn.h>
+#include <linux/xarray.h>
+#include <xen/xen.h>
+#include <xen/xen-ops.h>
+#include <xen/grant_table.h>
+
+struct xen_grant_dma_data {
+       /* The ID of backend domain */
+       domid_t backend_domid;
+       /* Is device behaving sane? */
+       bool broken;
+};
+
+static DEFINE_XARRAY(xen_grant_dma_devices);
+
+#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
+
+static inline dma_addr_t grant_to_dma(grant_ref_t grant)
+{
+       return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
+}
+
+static inline grant_ref_t dma_to_grant(dma_addr_t dma)
+{
+       return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
+}
+
+static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
+{
+       struct xen_grant_dma_data *data;
+
+       xa_lock(&xen_grant_dma_devices);
+       data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
+       xa_unlock(&xen_grant_dma_devices);
+
+       return data;
+}
+
+/*
+ * DMA ops for Xen frontends (e.g. virtio).
+ *
+ * Used to act as a kind of software IOMMU for Xen guests by using grants as
+ * DMA addresses.
+ * Such a DMA address is formed by using the grant reference as a frame
+ * number and setting the highest address bit (this bit is for the backend
+ * to be able to distinguish it from e.g. a mmio address).
+ */
+static void *xen_grant_dma_alloc(struct device *dev, size_t size,
+                                dma_addr_t *dma_handle, gfp_t gfp,
+                                unsigned long attrs)
+{
+       struct xen_grant_dma_data *data;
+       unsigned int i, n_pages = PFN_UP(size);
+       unsigned long pfn;
+       grant_ref_t grant;
+       void *ret;
+
+       data = find_xen_grant_dma_data(dev);
+       if (!data)
+               return NULL;
+
+       if (unlikely(data->broken))
+               return NULL;
+
+       ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
+       if (!ret)
+               return NULL;
+
+       pfn = virt_to_pfn(ret);
+
+       if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
+               free_pages_exact(ret, n_pages * PAGE_SIZE);
+               return NULL;
+       }
+
+       for (i = 0; i < n_pages; i++) {
+               gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
+                               pfn_to_gfn(pfn + i), 0);
+       }
+
+       *dma_handle = grant_to_dma(grant);
+
+       return ret;
+}
+
+static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
+                              dma_addr_t dma_handle, unsigned long attrs)
+{
+       struct xen_grant_dma_data *data;
+       unsigned int i, n_pages = PFN_UP(size);
+       grant_ref_t grant;
+
+       data = find_xen_grant_dma_data(dev);
+       if (!data)
+               return;
+
+       if (unlikely(data->broken))
+               return;
+
+       grant = dma_to_grant(dma_handle);
+
+       for (i = 0; i < n_pages; i++) {
+               if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
+                       dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
+                       data->broken = true;
+                       return;
+               }
+       }
+
+       gnttab_free_grant_reference_seq(grant, n_pages);
+
+       free_pages_exact(vaddr, n_pages * PAGE_SIZE);
+}
+
+static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
+                                             dma_addr_t *dma_handle,
+                                             enum dma_data_direction dir,
+                                             gfp_t gfp)
+{
+       void *vaddr;
+
+       vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
+       if (!vaddr)
+               return NULL;
+
+       return virt_to_page(vaddr);
+}
+
+static void xen_grant_dma_free_pages(struct device *dev, size_t size,
+                                    struct page *vaddr, dma_addr_t dma_handle,
+                                    enum dma_data_direction dir)
+{
+       xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
+}
+
+static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
+                                        unsigned long offset, size_t size,
+                                        enum dma_data_direction dir,
+                                        unsigned long attrs)
+{
+       struct xen_grant_dma_data *data;
+       unsigned int i, n_pages = PFN_UP(size);
+       grant_ref_t grant;
+       dma_addr_t dma_handle;
+
+       if (WARN_ON(dir == DMA_NONE))
+               return DMA_MAPPING_ERROR;
+
+       data = find_xen_grant_dma_data(dev);
+       if (!data)
+               return DMA_MAPPING_ERROR;
+
+       if (unlikely(data->broken))
+               return DMA_MAPPING_ERROR;
+
+       if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
+               return DMA_MAPPING_ERROR;
+
+       for (i = 0; i < n_pages; i++) {
+               gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
+                               xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
+       }
+
+       dma_handle = grant_to_dma(grant) + offset;
+
+       return dma_handle;
+}
+
+static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+                                    size_t size, enum dma_data_direction dir,
+                                    unsigned long attrs)
+{
+       struct xen_grant_dma_data *data;
+       unsigned int i, n_pages = PFN_UP(size);
+       grant_ref_t grant;
+
+       if (WARN_ON(dir == DMA_NONE))
+               return;
+
+       data = find_xen_grant_dma_data(dev);
+       if (!data)
+               return;
+
+       if (unlikely(data->broken))
+               return;
+
+       grant = dma_to_grant(dma_handle);
+
+       for (i = 0; i < n_pages; i++) {
+               if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
+                       dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
+                       data->broken = true;
+                       return;
+               }
+       }
+
+       gnttab_free_grant_reference_seq(grant, n_pages);
+}
+
+static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                                  int nents, enum dma_data_direction dir,
+                                  unsigned long attrs)
+{
+       struct scatterlist *s;
+       unsigned int i;
+
+       if (WARN_ON(dir == DMA_NONE))
+               return;
+
+       for_each_sg(sg, s, nents, i)
+               xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
+                               attrs);
+}
+
+static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
+                               int nents, enum dma_data_direction dir,
+                               unsigned long attrs)
+{
+       struct scatterlist *s;
+       unsigned int i;
+
+       if (WARN_ON(dir == DMA_NONE))
+               return -EINVAL;
+
+       for_each_sg(sg, s, nents, i) {
+               s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
+                               s->length, dir, attrs);
+               if (s->dma_address == DMA_MAPPING_ERROR)
+                       goto out;
+
+               sg_dma_len(s) = s->length;
+       }
+
+       return nents;
+
+out:
+       xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+       sg_dma_len(sg) = 0;
+
+       return -EIO;
+}
+
+static int xen_grant_dma_supported(struct device *dev, u64 mask)
+{
+       return mask == DMA_BIT_MASK(64);
+}
+
+static const struct dma_map_ops xen_grant_dma_ops = {
+       .alloc = xen_grant_dma_alloc,
+       .free = xen_grant_dma_free,
+       .alloc_pages = xen_grant_dma_alloc_pages,
+       .free_pages = xen_grant_dma_free_pages,
+       .mmap = dma_common_mmap,
+       .get_sgtable = dma_common_get_sgtable,
+       .map_page = xen_grant_dma_map_page,
+       .unmap_page = xen_grant_dma_unmap_page,
+       .map_sg = xen_grant_dma_map_sg,
+       .unmap_sg = xen_grant_dma_unmap_sg,
+       .dma_supported = xen_grant_dma_supported,
+};
+
+bool xen_is_grant_dma_device(struct device *dev)
+{
+       struct device_node *iommu_np;
+       bool has_iommu;
+
+       /* XXX Handle only DT devices for now */
+       if (!dev->of_node)
+               return false;
+
+       iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
+       has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
+       of_node_put(iommu_np);
+
+       return has_iommu;
+}
+
+void xen_grant_setup_dma_ops(struct device *dev)
+{
+       struct xen_grant_dma_data *data;
+       struct of_phandle_args iommu_spec;
+
+       data = find_xen_grant_dma_data(dev);
+       if (data) {
+               dev_err(dev, "Xen grant DMA data is already created\n");
+               return;
+       }
+
+       /* XXX ACPI device unsupported for now */
+       if (!dev->of_node)
+               goto err;
+
+       if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
+                       0, &iommu_spec)) {
+               dev_err(dev, "Cannot parse iommus property\n");
+               goto err;
+       }
+
+       if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
+                       iommu_spec.args_count != 1) {
+               dev_err(dev, "Incompatible IOMMU node\n");
+               of_node_put(iommu_spec.np);
+               goto err;
+       }
+
+       of_node_put(iommu_spec.np);
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto err;
+
+       /*
+        * The endpoint ID here means the ID of the domain where the corresponding
+        * backend is running
+        */
+       data->backend_domid = iommu_spec.args[0];
+
+       if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
+                       GFP_KERNEL))) {
+               dev_err(dev, "Cannot store Xen grant DMA data\n");
+               goto err;
+       }
+
+       dev->dma_ops = &xen_grant_dma_ops;
+
+       return;
+
+err:
+       dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
+}
+
+MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
+MODULE_LICENSE("GPL");
index 7a18292..738029d 100644 (file)
@@ -33,6 +33,7 @@
 
 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 
+#include <linux/bitmap.h>
 #include <linux/memblock.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 
 static grant_ref_t **gnttab_list;
 static unsigned int nr_grant_frames;
+
+/*
+ * Handling of free grants:
+ *
+ * Free grants are in a simple list anchored in gnttab_free_head. They are
+ * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
+ * of free entries is stored in gnttab_free_count.
+ * Additionally there is a bitmap of free entries anchored in
+ * gnttab_free_bitmap. This is being used for simplifying allocation of
+ * multiple consecutive grants, which is needed e.g. for support of virtio.
+ * gnttab_last_free is used to add free entries of new frames at the end of
+ * the free list.
+ * gnttab_free_tail_ptr specifies the variable which references the start
+ * of consecutive free grants ending with gnttab_last_free. This pointer is
+ * updated in a rather defensive way, in order to avoid performance hits in
+ * hot paths.
+ * All those variables are protected by gnttab_list_lock.
+ */
 static int gnttab_free_count;
-static grant_ref_t gnttab_free_head;
+static unsigned int gnttab_size;
+static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
+static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
+static grant_ref_t *gnttab_free_tail_ptr;
+static unsigned long *gnttab_free_bitmap;
 static DEFINE_SPINLOCK(gnttab_list_lock);
+
 struct grant_frames xen_auto_xlat_grant_frames;
 static unsigned int xen_gnttab_version;
 module_param_named(version, xen_gnttab_version, uint, 0);
@@ -168,16 +192,116 @@ static int get_free_entries(unsigned count)
 
        ref = head = gnttab_free_head;
        gnttab_free_count -= count;
-       while (count-- > 1)
-               head = gnttab_entry(head);
+       while (count--) {
+               bitmap_clear(gnttab_free_bitmap, head, 1);
+               if (gnttab_free_tail_ptr == __gnttab_entry(head))
+                       gnttab_free_tail_ptr = &gnttab_free_head;
+               if (count)
+                       head = gnttab_entry(head);
+       }
        gnttab_free_head = gnttab_entry(head);
        gnttab_entry(head) = GNTTAB_LIST_END;
 
+       if (!gnttab_free_count) {
+               gnttab_last_free = GNTTAB_LIST_END;
+               gnttab_free_tail_ptr = NULL;
+       }
+
        spin_unlock_irqrestore(&gnttab_list_lock, flags);
 
        return ref;
 }
 
+static int get_seq_entry_count(void)
+{
+       if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
+           *gnttab_free_tail_ptr == GNTTAB_LIST_END)
+               return 0;
+
+       return gnttab_last_free - *gnttab_free_tail_ptr + 1;
+}
+
+/* Rebuilds the free grant list and tries to find count consecutive entries. */
+static int get_free_seq(unsigned int count)
+{
+       int ret = -ENOSPC;
+       unsigned int from, to;
+       grant_ref_t *last;
+
+       gnttab_free_tail_ptr = &gnttab_free_head;
+       last = &gnttab_free_head;
+
+       for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
+            from < gnttab_size;
+            from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
+               to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
+                                       from + 1);
+               if (ret < 0 && to - from >= count) {
+                       ret = from;
+                       bitmap_clear(gnttab_free_bitmap, ret, count);
+                       from += count;
+                       gnttab_free_count -= count;
+                       if (from == to)
+                               continue;
+               }
+
+               /*
+                * Recreate the free list in order to have it properly sorted.
+                * This is needed to make sure that the free tail has the maximum
+                * possible size.
+                */
+               while (from < to) {
+                       *last = from;
+                       last = __gnttab_entry(from);
+                       gnttab_last_free = from;
+                       from++;
+               }
+               if (to < gnttab_size)
+                       gnttab_free_tail_ptr = __gnttab_entry(to - 1);
+       }
+
+       *last = GNTTAB_LIST_END;
+       if (gnttab_last_free != gnttab_size - 1)
+               gnttab_free_tail_ptr = NULL;
+
+       return ret;
+}
+
+static int get_free_entries_seq(unsigned int count)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&gnttab_list_lock, flags);
+
+       if (gnttab_free_count < count) {
+               ret = gnttab_expand(count - gnttab_free_count);
+               if (ret < 0)
+                       goto out;
+       }
+
+       if (get_seq_entry_count() < count) {
+               ret = get_free_seq(count);
+               if (ret >= 0)
+                       goto out;
+               ret = gnttab_expand(count - get_seq_entry_count());
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = *gnttab_free_tail_ptr;
+       *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
+       gnttab_free_count -= count;
+       if (!gnttab_free_count)
+               gnttab_free_tail_ptr = NULL;
+       bitmap_clear(gnttab_free_bitmap, ret, count);
+
+ out:
+       spin_unlock_irqrestore(&gnttab_list_lock, flags);
+
+       return ret;
+}
+
 static void do_free_callbacks(void)
 {
        struct gnttab_free_callback *callback, *next;
@@ -204,21 +328,51 @@ static inline void check_free_callbacks(void)
                do_free_callbacks();
 }
 
-static void put_free_entry(grant_ref_t ref)
+static void put_free_entry_locked(grant_ref_t ref)
 {
-       unsigned long flags;
-
        if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
                return;
 
-       spin_lock_irqsave(&gnttab_list_lock, flags);
        gnttab_entry(ref) = gnttab_free_head;
        gnttab_free_head = ref;
+       if (!gnttab_free_count)
+               gnttab_last_free = ref;
+       if (gnttab_free_tail_ptr == &gnttab_free_head)
+               gnttab_free_tail_ptr = __gnttab_entry(ref);
        gnttab_free_count++;
+       bitmap_set(gnttab_free_bitmap, ref, 1);
+}
+
+static void put_free_entry(grant_ref_t ref)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&gnttab_list_lock, flags);
+       put_free_entry_locked(ref);
        check_free_callbacks();
        spin_unlock_irqrestore(&gnttab_list_lock, flags);
 }
 
+static void gnttab_set_free(unsigned int start, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = start; i < start + n - 1; i++)
+               gnttab_entry(i) = i + 1;
+
+       gnttab_entry(i) = GNTTAB_LIST_END;
+       if (!gnttab_free_count) {
+               gnttab_free_head = start;
+               gnttab_free_tail_ptr = &gnttab_free_head;
+       } else {
+               gnttab_entry(gnttab_last_free) = start;
+       }
+       gnttab_free_count += n;
+       gnttab_last_free = i;
+
+       bitmap_set(gnttab_free_bitmap, start, n);
+}
+
 /*
  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
  * Introducing a valid entry into the grant table:
@@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head)
 {
        grant_ref_t ref;
        unsigned long flags;
-       int count = 1;
-       if (head == GNTTAB_LIST_END)
-               return;
+
        spin_lock_irqsave(&gnttab_list_lock, flags);
-       ref = head;
-       while (gnttab_entry(ref) != GNTTAB_LIST_END) {
-               ref = gnttab_entry(ref);
-               count++;
+       while (head != GNTTAB_LIST_END) {
+               ref = gnttab_entry(head);
+               put_free_entry_locked(head);
+               head = ref;
        }
-       gnttab_entry(ref) = gnttab_free_head;
-       gnttab_free_head = head;
-       gnttab_free_count += count;
        check_free_callbacks();
        spin_unlock_irqrestore(&gnttab_list_lock, flags);
 }
 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 
+void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
+{
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&gnttab_list_lock, flags);
+       for (i = count; i > 0; i--)
+               put_free_entry_locked(head + i - 1);
+       check_free_callbacks();
+       spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
+
 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 {
        int h = get_free_entries(count);
@@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 }
 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 
+int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
+{
+       int h;
+
+       if (count == 1)
+               h = get_free_entries(1);
+       else
+               h = get_free_entries_seq(count);
+
+       if (h < 0)
+               return -ENOSPC;
+
+       *first = h;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
+
 int gnttab_empty_grant_references(const grant_ref_t *private_head)
 {
        return (*private_head == GNTTAB_LIST_END);
@@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames)
                        goto grow_nomem;
        }
 
+       gnttab_set_free(gnttab_size, extra_entries);
 
-       for (i = grefs_per_frame * nr_grant_frames;
-            i < grefs_per_frame * new_nr_grant_frames - 1; i++)
-               gnttab_entry(i) = i + 1;
-
-       gnttab_entry(i) = gnttab_free_head;
-       gnttab_free_head = grefs_per_frame * nr_grant_frames;
-       gnttab_free_count += extra_entries;
+       if (!gnttab_free_tail_ptr)
+               gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
 
        nr_grant_frames = new_nr_grant_frames;
+       gnttab_size += extra_entries;
 
        check_free_callbacks();
 
@@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries)
 int gnttab_init(void)
 {
        int i;
-       unsigned long max_nr_grant_frames;
+       unsigned long max_nr_grant_frames, max_nr_grefs;
        unsigned int max_nr_glist_frames, nr_glist_frames;
-       unsigned int nr_init_grefs;
        int ret;
 
        gnttab_request_version();
        max_nr_grant_frames = gnttab_max_grant_frames();
+       max_nr_grefs = max_nr_grant_frames *
+                       gnttab_interface->grefs_per_grant_frame;
        nr_grant_frames = 1;
 
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
-       max_nr_glist_frames = (max_nr_grant_frames *
-                              gnttab_interface->grefs_per_grant_frame / RPP);
+       max_nr_glist_frames = max_nr_grefs / RPP;
 
        gnttab_list = kmalloc_array(max_nr_glist_frames,
                                    sizeof(grant_ref_t *),
@@ -1454,6 +1631,12 @@ int gnttab_init(void)
                }
        }
 
+       gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
+       if (!gnttab_free_bitmap) {
+               ret = -ENOMEM;
+               goto ini_nomem;
+       }
+
        ret = arch_gnttab_init(max_nr_grant_frames,
                               nr_status_frames(max_nr_grant_frames));
        if (ret < 0)
@@ -1464,15 +1647,10 @@ int gnttab_init(void)
                goto ini_nomem;
        }
 
-       nr_init_grefs = nr_grant_frames *
-                       gnttab_interface->grefs_per_grant_frame;
-
-       for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
-               gnttab_entry(i) = i + 1;
+       gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
 
-       gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
-       gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES;
-       gnttab_free_head  = GNTTAB_NR_RESERVED_ENTRIES;
+       gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
+                       gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
 
        printk("Grant table initialized\n");
        return 0;
@@ -1481,6 +1659,7 @@ int gnttab_init(void)
        for (i--; i >= 0; i--)
                free_page((unsigned long)gnttab_list[i]);
        kfree(gnttab_list);
+       bitmap_free(gnttab_free_bitmap);
        return ret;
 }
 EXPORT_SYMBOL_GPL(gnttab_init);
index 34742c6..f17c4c0 100644 (file)
@@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
 
 struct remap_pfn {
        struct mm_struct *mm;
index 1c8dc69..cebba4e 100644 (file)
@@ -62,12 +62,12 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
        version = cpu_to_le32(v9inode->qid.version);
        path = cpu_to_le64(v9inode->qid.path);
        v9ses = v9fs_inode2v9ses(inode);
-       v9inode->netfs_ctx.cache =
+       v9inode->netfs.cache =
                fscache_acquire_cookie(v9fs_session_cache(v9ses),
                                       0,
                                       &path, sizeof(path),
                                       &version, sizeof(version),
-                                      i_size_read(&v9inode->vfs_inode));
+                                      i_size_read(&v9inode->netfs.inode));
 
        p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
                 inode, v9fs_inode_cookie(v9inode));
index e28ddf7..0129de2 100644 (file)
@@ -625,7 +625,7 @@ static void v9fs_inode_init_once(void *foo)
        struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
 
        memset(&v9inode->qid, 0, sizeof(v9inode->qid));
-       inode_init_once(&v9inode->vfs_inode);
+       inode_init_once(&v9inode->netfs.inode);
 }
 
 /**
index ec0e8df..6acabc2 100644 (file)
@@ -109,11 +109,7 @@ struct v9fs_session_info {
 #define V9FS_INO_INVALID_ATTR 0x01
 
 struct v9fs_inode {
-       struct {
-               /* These must be contiguous */
-               struct inode    vfs_inode;      /* the VFS's inode record */
-               struct netfs_i_context netfs_ctx; /* Netfslib context */
-       };
+       struct netfs_inode netfs; /* Netfslib context and vfs inode */
        struct p9_qid qid;
        unsigned int cache_validity;
        struct p9_fid *writeback_fid;
@@ -122,13 +118,13 @@ struct v9fs_inode {
 
 static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
 {
-       return container_of(inode, struct v9fs_inode, vfs_inode);
+       return container_of(inode, struct v9fs_inode, netfs.inode);
 }
 
 static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
 {
 #ifdef CONFIG_9P_FSCACHE
-       return netfs_i_cookie(&v9inode->vfs_inode);
+       return netfs_i_cookie(&v9inode->netfs);
 #else
        return NULL;
 #endif
index 8ce82ff..a8f512b 100644 (file)
@@ -66,13 +66,12 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 }
 
 /**
- * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_request
- * @mapping: unused mapping of request to cleanup
- * @priv: private data to cleanup, a fid, guaranted non-null.
+ * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
+ * @rreq: The I/O request to clean up
  */
-static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
+static void v9fs_free_request(struct netfs_io_request *rreq)
 {
-       struct p9_fid *fid = priv;
+       struct p9_fid *fid = rreq->netfs_priv;
 
        p9_client_clunk(fid);
 }
@@ -94,9 +93,9 @@ static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
 
 const struct netfs_request_ops v9fs_req_ops = {
        .init_request           = v9fs_init_request,
+       .free_request           = v9fs_free_request,
        .begin_cache_operation  = v9fs_begin_cache_operation,
        .issue_read             = v9fs_issue_read,
-       .cleanup                = v9fs_req_cleanup,
 };
 
 /**
@@ -140,7 +139,7 @@ static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
            transferred_or_error != -ENOBUFS) {
                version = cpu_to_le32(v9inode->qid.version);
                fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
-                                  i_size_read(&v9inode->vfs_inode), 0);
+                                  i_size_read(&v9inode->netfs.inode), 0);
        }
 }
 
@@ -274,7 +273,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata);
+       retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
        if (retval < 0)
                return retval;
 
index 55367ec..419d2f3 100644 (file)
@@ -234,7 +234,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
        v9inode->writeback_fid = NULL;
        v9inode->cache_validity = 0;
        mutex_init(&v9inode->v_mutex);
-       return &v9inode->vfs_inode;
+       return &v9inode->netfs.inode;
 }
 
 /**
@@ -252,7 +252,8 @@ void v9fs_free_inode(struct inode *inode)
  */
 static void v9fs_set_netfs_context(struct inode *inode)
 {
-       netfs_i_context_init(inode, &v9fs_req_ops);
+       struct v9fs_inode *v9inode = V9FS_I(inode);
+       netfs_inode_init(&v9inode->netfs, &v9fs_req_ops);
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
index 1b4d580..a484fa6 100644 (file)
@@ -30,7 +30,7 @@ void afs_invalidate_mmap_work(struct work_struct *work)
 {
        struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
 
-       unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+       unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
 }
 
 void afs_server_init_callback_work(struct work_struct *work)
index 79f6b74..56ae5cd 100644 (file)
@@ -109,7 +109,7 @@ struct afs_lookup_cookie {
  */
 static void afs_dir_read_cleanup(struct afs_read *req)
 {
-       struct address_space *mapping = req->vnode->vfs_inode.i_mapping;
+       struct address_space *mapping = req->vnode->netfs.inode.i_mapping;
        struct folio *folio;
        pgoff_t last = req->nr_pages - 1;
 
@@ -153,7 +153,7 @@ static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
                block = kmap_local_folio(folio, offset);
                if (block->hdr.magic != AFS_DIR_MAGIC) {
                        printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
-                              __func__, dvnode->vfs_inode.i_ino,
+                              __func__, dvnode->netfs.inode.i_ino,
                               pos, offset, size, ntohs(block->hdr.magic));
                        trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
                        kunmap_local(block);
@@ -183,7 +183,7 @@ error:
 static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
 {
        union afs_xdr_dir_block *block;
-       struct address_space *mapping = dvnode->vfs_inode.i_mapping;
+       struct address_space *mapping = dvnode->netfs.inode.i_mapping;
        struct folio *folio;
        pgoff_t last = req->nr_pages - 1;
        size_t offset, size;
@@ -217,7 +217,7 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
  */
 static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
 {
-       struct address_space *mapping = dvnode->vfs_inode.i_mapping;
+       struct address_space *mapping = dvnode->netfs.inode.i_mapping;
        struct folio *folio;
        pgoff_t last = req->nr_pages - 1;
        int ret = 0;
@@ -269,7 +269,7 @@ static int afs_dir_open(struct inode *inode, struct file *file)
 static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
        __acquires(&dvnode->validate_lock)
 {
-       struct address_space *mapping = dvnode->vfs_inode.i_mapping;
+       struct address_space *mapping = dvnode->netfs.inode.i_mapping;
        struct afs_read *req;
        loff_t i_size;
        int nr_pages, i;
@@ -287,7 +287,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
        req->cleanup = afs_dir_read_cleanup;
 
 expand:
-       i_size = i_size_read(&dvnode->vfs_inode);
+       i_size = i_size_read(&dvnode->netfs.inode);
        if (i_size < 2048) {
                ret = afs_bad(dvnode, afs_file_error_dir_small);
                goto error;
@@ -305,7 +305,7 @@ expand:
        req->actual_len = i_size; /* May change */
        req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
        req->data_version = dvnode->status.data_version; /* May change */
-       iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages,
+       iov_iter_xarray(&req->def_iter, READ, &dvnode->netfs.inode.i_mapping->i_pages,
                        0, i_size);
        req->iter = &req->def_iter;
 
@@ -897,7 +897,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
 
 out_op:
        if (op->error == 0) {
-               inode = &op->file[1].vnode->vfs_inode;
+               inode = &op->file[1].vnode->netfs.inode;
                op->file[1].vnode = NULL;
        }
 
@@ -1139,7 +1139,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        afs_stat_v(dir, n_reval);
 
        /* search the directory for this vnode */
-       ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
+       ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version);
        switch (ret) {
        case 0:
                /* the filename maps to something */
@@ -1170,7 +1170,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
                        _debug("%pd: file deleted (uq %u -> %u I:%u)",
                               dentry, fid.unique,
                               vnode->fid.unique,
-                              vnode->vfs_inode.i_generation);
+                              vnode->netfs.inode.i_generation);
                        goto not_found;
                }
                goto out_valid;
@@ -1368,7 +1368,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
        if (d_really_is_positive(dentry)) {
                struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
 
-               clear_nlink(&vnode->vfs_inode);
+               clear_nlink(&vnode->netfs.inode);
                set_bit(AFS_VNODE_DELETED, &vnode->flags);
                clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
                clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
@@ -1487,8 +1487,8 @@ static void afs_dir_remove_link(struct afs_operation *op)
                /* Already done */
        } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
                write_seqlock(&vnode->cb_lock);
-               drop_nlink(&vnode->vfs_inode);
-               if (vnode->vfs_inode.i_nlink == 0) {
+               drop_nlink(&vnode->netfs.inode);
+               if (vnode->netfs.inode.i_nlink == 0) {
                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
                        __afs_break_callback(vnode, afs_cb_break_for_unlink);
                }
@@ -1504,7 +1504,7 @@ static void afs_dir_remove_link(struct afs_operation *op)
                        op->error = ret;
        }
 
-       _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error);
+       _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, op->error);
 }
 
 static void afs_unlink_success(struct afs_operation *op)
@@ -1680,8 +1680,8 @@ static void afs_link_success(struct afs_operation *op)
        afs_update_dentry_version(op, dvp, op->dentry);
        if (op->dentry_2->d_parent == op->dentry->d_parent)
                afs_update_dentry_version(op, dvp, op->dentry_2);
-       ihold(&vp->vnode->vfs_inode);
-       d_instantiate(op->dentry, &vp->vnode->vfs_inode);
+       ihold(&vp->vnode->netfs.inode);
+       d_instantiate(op->dentry, &vp->vnode->netfs.inode);
 }
 
 static void afs_link_put(struct afs_operation *op)
index d98e109..0ab7752 100644 (file)
@@ -109,7 +109,7 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
  */
 static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
 {
-       struct address_space *mapping = vnode->vfs_inode.i_mapping;
+       struct address_space *mapping = vnode->netfs.inode.i_mapping;
        struct folio *folio;
 
        folio = __filemap_get_folio(mapping, index,
@@ -216,7 +216,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
 
        _enter(",,{%d,%s},", name->len, name->name);
 
-       i_size = i_size_read(&vnode->vfs_inode);
+       i_size = i_size_read(&vnode->netfs.inode);
        if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
            (i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
                clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
@@ -336,7 +336,7 @@ found_space:
        if (b < AFS_DIR_BLOCKS_WITH_CTR)
                meta->meta.alloc_ctrs[b] -= need_slots;
 
-       inode_inc_iversion_raw(&vnode->vfs_inode);
+       inode_inc_iversion_raw(&vnode->netfs.inode);
        afs_stat_v(vnode, n_dir_cr);
        _debug("Insert %s in %u[%u]", name->name, b, slot);
 
@@ -383,7 +383,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
 
        _enter(",,{%d,%s},", name->len, name->name);
 
-       i_size = i_size_read(&vnode->vfs_inode);
+       i_size = i_size_read(&vnode->netfs.inode);
        if (i_size < AFS_DIR_BLOCK_SIZE ||
            i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
            (i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
@@ -463,7 +463,7 @@ found_dirent:
        if (b < AFS_DIR_BLOCKS_WITH_CTR)
                meta->meta.alloc_ctrs[b] += need_slots;
 
-       inode_set_iversion_raw(&vnode->vfs_inode, vnode->status.data_version);
+       inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
        afs_stat_v(vnode, n_dir_rm);
        _debug("Remove %s from %u[%u]", name->name, b, slot);
 
index 45cfd50..bb5807e 100644 (file)
@@ -131,7 +131,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
                        goto out;
        } while (!d_is_negative(sdentry));
 
-       ihold(&vnode->vfs_inode);
+       ihold(&vnode->netfs.inode);
 
        ret = afs_do_silly_rename(dvnode, vnode, dentry, sdentry, key);
        switch (ret) {
@@ -148,7 +148,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
                d_drop(sdentry);
        }
 
-       iput(&vnode->vfs_inode);
+       iput(&vnode->netfs.inode);
        dput(sdentry);
 out:
        _leave(" = %d", ret);
index f120bcb..d7d9402 100644 (file)
@@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        /* there shouldn't be an existing inode */
        BUG_ON(!(inode->i_state & I_NEW));
 
-       netfs_i_context_init(inode, NULL);
+       netfs_inode_init(&vnode->netfs, NULL);
        inode->i_size           = 0;
        inode->i_mode           = S_IFDIR | S_IRUGO | S_IXUGO;
        if (root) {
index a8e8832..42118a4 100644 (file)
@@ -194,7 +194,7 @@ int afs_release(struct inode *inode, struct file *file)
                afs_put_wb_key(af->wb);
 
        if ((file->f_mode & FMODE_WRITE)) {
-               i_size = i_size_read(&vnode->vfs_inode);
+               i_size = i_size_read(&vnode->netfs.inode);
                afs_set_cache_aux(vnode, &aux);
                fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size);
        } else {
@@ -325,7 +325,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
        fsreq->iter     = &fsreq->def_iter;
 
        iov_iter_xarray(&fsreq->def_iter, READ,
-                       &fsreq->vnode->vfs_inode.i_mapping->i_pages,
+                       &fsreq->vnode->netfs.inode.i_mapping->i_pages,
                        fsreq->pos, fsreq->len);
 
        afs_fetch_data(fsreq->vnode, fsreq);
@@ -382,17 +382,17 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
        return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
 }
 
-static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv)
+static void afs_free_request(struct netfs_io_request *rreq)
 {
-       key_put(netfs_priv);
+       key_put(rreq->netfs_priv);
 }
 
 const struct netfs_request_ops afs_req_ops = {
        .init_request           = afs_init_request,
+       .free_request           = afs_free_request,
        .begin_cache_operation  = afs_begin_cache_operation,
        .check_write_begin      = afs_check_write_begin,
        .issue_read             = afs_issue_read,
-       .cleanup                = afs_priv_cleanup,
 };
 
 int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
index d222dfb..7a3803c 100644 (file)
@@ -232,14 +232,14 @@ int afs_put_operation(struct afs_operation *op)
        if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
                clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
        if (op->file[0].put_vnode)
-               iput(&op->file[0].vnode->vfs_inode);
+               iput(&op->file[0].vnode->netfs.inode);
        if (op->file[1].put_vnode)
-               iput(&op->file[1].vnode->vfs_inode);
+               iput(&op->file[1].vnode->netfs.inode);
 
        if (op->more_files) {
                for (i = 0; i < op->nr_files - 2; i++)
                        if (op->more_files[i].put_vnode)
-                               iput(&op->more_files[i].vnode->vfs_inode);
+                               iput(&op->more_files[i].vnode->netfs.inode);
                kfree(op->more_files);
        }
 
index 30b0662..89630ac 100644 (file)
@@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
  */
 static void afs_set_netfs_context(struct afs_vnode *vnode)
 {
-       netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops);
+       netfs_inode_init(&vnode->netfs, &afs_req_ops);
 }
 
 /*
@@ -96,7 +96,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
        inode->i_flags |= S_NOATIME;
        inode->i_uid = make_kuid(&init_user_ns, status->owner);
        inode->i_gid = make_kgid(&init_user_ns, status->group);
-       set_nlink(&vnode->vfs_inode, status->nlink);
+       set_nlink(&vnode->netfs.inode, status->nlink);
 
        switch (status->type) {
        case AFS_FTYPE_FILE:
@@ -139,7 +139,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
        afs_set_netfs_context(vnode);
 
        vnode->invalid_before   = status->data_version;
-       inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
+       inode_set_iversion_raw(&vnode->netfs.inode, status->data_version);
 
        if (!vp->scb.have_cb) {
                /* it's a symlink we just created (the fileserver
@@ -163,7 +163,7 @@ static void afs_apply_status(struct afs_operation *op,
 {
        struct afs_file_status *status = &vp->scb.status;
        struct afs_vnode *vnode = vp->vnode;
-       struct inode *inode = &vnode->vfs_inode;
+       struct inode *inode = &vnode->netfs.inode;
        struct timespec64 t;
        umode_t mode;
        bool data_changed = false;
@@ -246,7 +246,7 @@ static void afs_apply_status(struct afs_operation *op,
                 * idea of what the size should be that's not the same as
                 * what's on the server.
                 */
-               vnode->netfs_ctx.remote_i_size = status->size;
+               vnode->netfs.remote_i_size = status->size;
                if (change_size) {
                        afs_set_i_size(vnode, status->size);
                        inode->i_ctime = t;
@@ -289,7 +289,7 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
                 */
                if (vp->scb.status.abort_code == VNOVNODE) {
                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
-                       clear_nlink(&vnode->vfs_inode);
+                       clear_nlink(&vnode->netfs.inode);
                        __afs_break_callback(vnode, afs_cb_break_for_deleted);
                        op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
                }
@@ -306,8 +306,8 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
                if (vp->scb.have_cb)
                        afs_apply_callback(op, vp);
        } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) {
-               drop_nlink(&vnode->vfs_inode);
-               if (vnode->vfs_inode.i_nlink == 0) {
+               drop_nlink(&vnode->netfs.inode);
+               if (vnode->netfs.inode.i_nlink == 0) {
                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
                        __afs_break_callback(vnode, afs_cb_break_for_deleted);
                }
@@ -326,7 +326,7 @@ static void afs_fetch_status_success(struct afs_operation *op)
        struct afs_vnode *vnode = vp->vnode;
        int ret;
 
-       if (vnode->vfs_inode.i_state & I_NEW) {
+       if (vnode->netfs.inode.i_state & I_NEW) {
                ret = afs_inode_init_from_status(op, vp, vnode);
                op->error = ret;
                if (ret == 0)
@@ -430,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
        struct afs_vnode_cache_aux aux;
 
        if (vnode->status.type != AFS_FTYPE_FILE) {
-               vnode->netfs_ctx.cache = NULL;
+               vnode->netfs.cache = NULL;
                return;
        }
 
@@ -457,7 +457,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
 struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
 {
        struct afs_vnode_param *dvp = &op->file[0];
-       struct super_block *sb = dvp->vnode->vfs_inode.i_sb;
+       struct super_block *sb = dvp->vnode->netfs.inode.i_sb;
        struct afs_vnode *vnode;
        struct inode *inode;
        int ret;
@@ -582,10 +582,10 @@ static void afs_zap_data(struct afs_vnode *vnode)
        /* nuke all the non-dirty pages that aren't locked, mapped or being
         * written back in a regular file and completely discard the pages in a
         * directory or symlink */
-       if (S_ISREG(vnode->vfs_inode.i_mode))
-               invalidate_remote_inode(&vnode->vfs_inode);
+       if (S_ISREG(vnode->netfs.inode.i_mode))
+               invalidate_remote_inode(&vnode->netfs.inode);
        else
-               invalidate_inode_pages2(vnode->vfs_inode.i_mapping);
+               invalidate_inode_pages2(vnode->netfs.inode.i_mapping);
 }
 
 /*
@@ -683,8 +683,8 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
               key_serial(key));
 
        if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
-               if (vnode->vfs_inode.i_nlink)
-                       clear_nlink(&vnode->vfs_inode);
+               if (vnode->netfs.inode.i_nlink)
+                       clear_nlink(&vnode->netfs.inode);
                goto valid;
        }
 
@@ -826,7 +826,7 @@ void afs_evict_inode(struct inode *inode)
 static void afs_setattr_success(struct afs_operation *op)
 {
        struct afs_vnode_param *vp = &op->file[0];
-       struct inode *inode = &vp->vnode->vfs_inode;
+       struct inode *inode = &vp->vnode->netfs.inode;
        loff_t old_i_size = i_size_read(inode);
 
        op->setattr.old_i_size = old_i_size;
@@ -843,7 +843,7 @@ static void afs_setattr_success(struct afs_operation *op)
 static void afs_setattr_edit_file(struct afs_operation *op)
 {
        struct afs_vnode_param *vp = &op->file[0];
-       struct inode *inode = &vp->vnode->vfs_inode;
+       struct inode *inode = &vp->vnode->netfs.inode;
 
        if (op->setattr.attr->ia_valid & ATTR_SIZE) {
                loff_t size = op->setattr.attr->ia_size;
@@ -875,7 +875,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH;
        struct afs_operation *op;
        struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
-       struct inode *inode = &vnode->vfs_inode;
+       struct inode *inode = &vnode->netfs.inode;
        loff_t i_size;
        int ret;
 
index a309959..a6f25d9 100644 (file)
@@ -619,12 +619,7 @@ enum afs_lock_state {
  * leak from one inode to another.
  */
 struct afs_vnode {
-       struct {
-               /* These must be contiguous */
-               struct inode    vfs_inode;      /* the VFS's inode record */
-               struct netfs_i_context netfs_ctx; /* Netfslib context */
-       };
-
+       struct netfs_inode      netfs;          /* Netfslib context and vfs inode */
        struct afs_volume       *volume;        /* volume on which vnode resides */
        struct afs_fid          fid;            /* the file identifier for this inode */
        struct afs_file_status  status;         /* AFS status info for this file */
@@ -675,7 +670,7 @@ struct afs_vnode {
 static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
 {
 #ifdef CONFIG_AFS_FSCACHE
-       return netfs_i_cookie(&vnode->vfs_inode);
+       return netfs_i_cookie(&vnode->netfs);
 #else
        return NULL;
 #endif
@@ -685,7 +680,7 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
                                       struct fscache_cookie *cookie)
 {
 #ifdef CONFIG_AFS_FSCACHE
-       vnode->netfs_ctx.cache = cookie;
+       vnode->netfs.cache = cookie;
 #endif
 }
 
@@ -892,7 +887,7 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
 
        afs_set_cache_aux(vnode, &aux);
        fscache_invalidate(afs_vnode_cache(vnode), &aux,
-                          i_size_read(&vnode->vfs_inode), flags);
+                          i_size_read(&vnode->netfs.inode), flags);
 }
 
 /*
@@ -1217,7 +1212,7 @@ static inline struct afs_net *afs_i2net(struct inode *inode)
 
 static inline struct afs_net *afs_v2net(struct afs_vnode *vnode)
 {
-       return afs_i2net(&vnode->vfs_inode);
+       return afs_i2net(&vnode->netfs.inode);
 }
 
 static inline struct afs_net *afs_sock2net(struct sock *sk)
@@ -1593,12 +1588,12 @@ extern void yfs_fs_store_opaque_acl2(struct afs_operation *);
  */
 static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
 {
-       return container_of(inode, struct afs_vnode, vfs_inode);
+       return container_of(inode, struct afs_vnode, netfs.inode);
 }
 
 static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
 {
-       return &vnode->vfs_inode;
+       return &vnode->netfs.inode;
 }
 
 /*
@@ -1621,8 +1616,8 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
  */
 static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
 {
-       i_size_write(&vnode->vfs_inode, size);
-       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+       i_size_write(&vnode->netfs.inode, size);
+       vnode->netfs.inode.i_blocks = ((size + 1023) >> 10) << 1;
 }
 
 /*
index 1fea195..95d7130 100644 (file)
@@ -659,7 +659,7 @@ static void afs_i_init_once(void *_vnode)
        struct afs_vnode *vnode = _vnode;
 
        memset(vnode, 0, sizeof(*vnode));
-       inode_init_once(&vnode->vfs_inode);
+       inode_init_once(&vnode->netfs.inode);
        mutex_init(&vnode->io_lock);
        init_rwsem(&vnode->validate_lock);
        spin_lock_init(&vnode->wb_lock);
@@ -700,8 +700,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        init_rwsem(&vnode->rmdir_lock);
        INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
 
-       _leave(" = %p", &vnode->vfs_inode);
-       return &vnode->vfs_inode;
+       _leave(" = %p", &vnode->netfs.inode);
+       return &vnode->netfs.inode;
 }
 
 static void afs_free_inode(struct inode *inode)
index 94a3d24..cc665ce 100644 (file)
@@ -9,8 +9,7 @@
 #include <linux/slab.h>
 #include "internal.h"
 
-unsigned __read_mostly afs_volume_gc_delay = 10;
-unsigned __read_mostly afs_volume_record_life = 60 * 60;
+static unsigned __read_mostly afs_volume_record_life = 60 * 60;
 
 /*
  * Insert a volume into a cell.  If there's an existing volume record, that is
index 2236b21..2c885b2 100644 (file)
@@ -60,7 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
         * file.  We need to do this before we get a lock on the page in case
         * there's more than one writer competing for the same cache block.
         */
-       ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata);
+       ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
        if (ret < 0)
                return ret;
 
@@ -146,10 +146,10 @@ int afs_write_end(struct file *file, struct address_space *mapping,
 
        write_end_pos = pos + copied;
 
-       i_size = i_size_read(&vnode->vfs_inode);
+       i_size = i_size_read(&vnode->netfs.inode);
        if (write_end_pos > i_size) {
                write_seqlock(&vnode->cb_lock);
-               i_size = i_size_read(&vnode->vfs_inode);
+               i_size = i_size_read(&vnode->netfs.inode);
                if (write_end_pos > i_size)
                        afs_set_i_size(vnode, write_end_pos);
                write_sequnlock(&vnode->cb_lock);
@@ -257,7 +257,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
  */
 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
 {
-       struct address_space *mapping = vnode->vfs_inode.i_mapping;
+       struct address_space *mapping = vnode->netfs.inode.i_mapping;
        struct folio *folio;
        pgoff_t end;
 
@@ -354,7 +354,6 @@ static const struct afs_operation_ops afs_store_data_operation = {
 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
                          bool laundering)
 {
-       struct netfs_i_context *ictx = &vnode->netfs_ctx;
        struct afs_operation *op;
        struct afs_wb_key *wbk = NULL;
        loff_t size = iov_iter_count(iter);
@@ -385,9 +384,9 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
        op->store.write_iter = iter;
        op->store.pos = pos;
        op->store.size = size;
-       op->store.i_size = max(pos + size, ictx->remote_i_size);
+       op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
        op->store.laundering = laundering;
-       op->mtime = vnode->vfs_inode.i_mtime;
+       op->mtime = vnode->netfs.inode.i_mtime;
        op->flags |= AFS_OPERATION_UNINTR;
        op->ops = &afs_store_data_operation;
 
@@ -554,7 +553,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
        struct iov_iter iter;
        unsigned long priv;
        unsigned int offset, to, len, max_len;
-       loff_t i_size = i_size_read(&vnode->vfs_inode);
+       loff_t i_size = i_size_read(&vnode->netfs.inode);
        bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
        bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
        long count = wbc->nr_to_write;
@@ -845,7 +844,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
        _enter("{%llx:%llu},{%zu},",
               vnode->fid.vid, vnode->fid.vnode, count);
 
-       if (IS_SWAPFILE(&vnode->vfs_inode)) {
+       if (IS_SWAPFILE(&vnode->netfs.inode)) {
                printk(KERN_INFO
                       "AFS: Attempt to write to active swap file!\n");
                return -EBUSY;
@@ -958,8 +957,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
        /* Discard unused keys */
        spin_lock(&vnode->wb_lock);
 
-       if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
-           !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
+       if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
+           !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
                list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
                        if (refcount_read(&wbk->usage) == 1)
                                list_move(&wbk->vnode_link, &graveyard);
@@ -1034,6 +1033,6 @@ static void afs_write_to_cache(struct afs_vnode *vnode,
                               bool caching)
 {
        fscache_write_to_cache(afs_vnode_cache(vnode),
-                              vnode->vfs_inode.i_mapping, start, len, i_size,
+                              vnode->netfs.inode.i_mapping, start, len, i_size,
                               afs_write_to_cache_done, vnode, caching);
 }
index e5221be..6dee888 100644 (file)
@@ -394,11 +394,10 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
        return 0;
 }
 
-static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
+static void ceph_netfs_free_request(struct netfs_io_request *rreq)
 {
-       struct inode *inode = mapping->host;
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       int got = (uintptr_t)priv;
+       struct ceph_inode_info *ci = ceph_inode(rreq->inode);
+       int got = (uintptr_t)rreq->netfs_priv;
 
        if (got)
                ceph_put_cap_refs(ci, got);
@@ -406,12 +405,12 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
 
 const struct netfs_request_ops ceph_netfs_ops = {
        .init_request           = ceph_init_request,
+       .free_request           = ceph_netfs_free_request,
        .begin_cache_operation  = ceph_begin_cache_operation,
        .issue_read             = ceph_netfs_issue_read,
        .expand_readahead       = ceph_netfs_expand_readahead,
        .clamp_length           = ceph_netfs_clamp_length,
        .check_write_begin      = ceph_netfs_check_write_begin,
-       .cleanup                = ceph_readahead_cleanup,
 };
 
 #ifdef CONFIG_CEPH_FSCACHE
@@ -1322,10 +1321,11 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
                            struct page **pagep, void **fsdata)
 {
        struct inode *inode = file_inode(file);
+       struct ceph_inode_info *ci = ceph_inode(inode);
        struct folio *folio = NULL;
        int r;
 
-       r = netfs_write_begin(file, inode->i_mapping, pos, len, &folio, NULL);
+       r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
        if (r == 0)
                folio_wait_fscache(folio);
        if (r < 0) {
@@ -1798,7 +1798,7 @@ enum {
 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
                                s64 pool, struct ceph_string *pool_ns)
 {
-       struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
        struct rb_node **p, *parent;
@@ -1913,7 +1913,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
                                     0, false, true);
        err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
 
-       wr_req->r_mtime = ci->vfs_inode.i_mtime;
+       wr_req->r_mtime = ci->netfs.inode.i_mtime;
        err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
 
        if (!err)
index ddea999..177d8e8 100644 (file)
@@ -29,9 +29,9 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
        if (!(inode->i_state & I_NEW))
                return;
 
-       WARN_ON_ONCE(ci->netfs_ctx.cache);
+       WARN_ON_ONCE(ci->netfs.cache);
 
-       ci->netfs_ctx.cache =
+       ci->netfs.cache =
                fscache_acquire_cookie(fsc->fscache, 0,
                                       &ci->i_vino, sizeof(ci->i_vino),
                                       &ci->i_version, sizeof(ci->i_version),
index 7255b79..dc502da 100644 (file)
@@ -28,7 +28,7 @@ void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
 
 static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
 {
-       return netfs_i_cookie(&ci->vfs_inode);
+       return netfs_i_cookie(&ci->netfs);
 }
 
 static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
index bf2e940..38c9303 100644 (file)
@@ -492,7 +492,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
        struct ceph_mount_options *opt = mdsc->fsc->mount_options;
        ci->i_hold_caps_max = round_jiffies(jiffies +
                                            opt->caps_wanted_delay_max * HZ);
-       dout("__cap_set_timeouts %p %lu\n", &ci->vfs_inode,
+       dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode,
             ci->i_hold_caps_max - jiffies);
 }
 
@@ -507,7 +507,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
                                struct ceph_inode_info *ci)
 {
-       dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->vfs_inode,
+       dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode,
             ci->i_ceph_flags, ci->i_hold_caps_max);
        if (!mdsc->stopping) {
                spin_lock(&mdsc->cap_delay_lock);
@@ -531,7 +531,7 @@ no_change:
 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
                                      struct ceph_inode_info *ci)
 {
-       dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
+       dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode);
        spin_lock(&mdsc->cap_delay_lock);
        ci->i_ceph_flags |= CEPH_I_FLUSH;
        if (!list_empty(&ci->i_cap_delay_list))
@@ -548,7 +548,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
 {
-       dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
+       dout("__cap_delay_cancel %p\n", &ci->netfs.inode);
        if (list_empty(&ci->i_cap_delay_list))
                return;
        spin_lock(&mdsc->cap_delay_lock);
@@ -568,7 +568,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
         * Each time we receive FILE_CACHE anew, we increment
         * i_rdcache_gen.
         */
-       if (S_ISREG(ci->vfs_inode.i_mode) &&
+       if (S_ISREG(ci->netfs.inode.i_mode) &&
            (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
            (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
                ci->i_rdcache_gen++;
@@ -583,14 +583,14 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
        if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) {
                if (issued & CEPH_CAP_FILE_SHARED)
                        atomic_inc(&ci->i_shared_gen);
-               if (S_ISDIR(ci->vfs_inode.i_mode)) {
-                       dout(" marking %p NOT complete\n", &ci->vfs_inode);
+               if (S_ISDIR(ci->netfs.inode.i_mode)) {
+                       dout(" marking %p NOT complete\n", &ci->netfs.inode);
                        __ceph_dir_clear_complete(ci);
                }
        }
 
        /* Wipe saved layout if we're losing DIR_CREATE caps */
-       if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
+       if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
                !(issued & CEPH_CAP_DIR_CREATE)) {
             ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
             memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
@@ -771,7 +771,7 @@ static int __cap_is_valid(struct ceph_cap *cap)
 
        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
                dout("__cap_is_valid %p cap %p issued %s "
-                    "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
+                    "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode,
                     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
                return 0;
        }
@@ -797,7 +797,7 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
                if (!__cap_is_valid(cap))
                        continue;
                dout("__ceph_caps_issued %p cap %p issued %s\n",
-                    &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
+                    &ci->netfs.inode, cap, ceph_cap_string(cap->issued));
                have |= cap->issued;
                if (implemented)
                        *implemented |= cap->implemented;
@@ -844,12 +844,12 @@ static void __touch_cap(struct ceph_cap *cap)
 
        spin_lock(&s->s_cap_lock);
        if (!s->s_cap_iterator) {
-               dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
+               dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap,
                     s->s_mds);
                list_move_tail(&cap->session_caps, &s->s_caps);
        } else {
                dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
-                    &cap->ci->vfs_inode, cap, s->s_mds);
+                    &cap->ci->netfs.inode, cap, s->s_mds);
        }
        spin_unlock(&s->s_cap_lock);
 }
@@ -867,7 +867,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 
        if ((have & mask) == mask) {
                dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s"
-                    " (mask %s)\n", ceph_ino(&ci->vfs_inode),
+                    " (mask %s)\n", ceph_ino(&ci->netfs.inode),
                     ceph_cap_string(have),
                     ceph_cap_string(mask));
                return 1;
@@ -879,7 +879,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
                        continue;
                if ((cap->issued & mask) == mask) {
                        dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s"
-                            " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap,
+                            " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap,
                             ceph_cap_string(cap->issued),
                             ceph_cap_string(mask));
                        if (touch)
@@ -891,7 +891,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
                have |= cap->issued;
                if ((have & mask) == mask) {
                        dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s"
-                            " (mask %s)\n", ceph_ino(&ci->vfs_inode),
+                            " (mask %s)\n", ceph_ino(&ci->netfs.inode),
                             ceph_cap_string(cap->issued),
                             ceph_cap_string(mask));
                        if (touch) {
@@ -919,7 +919,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
                                   int touch)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
        int r;
 
        r = __ceph_caps_issued_mask(ci, mask, touch);
@@ -950,7 +950,7 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
 
 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        int ret;
 
        spin_lock(&ci->i_ceph_lock);
@@ -969,8 +969,8 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
        if (ci->i_rd_ref)
                used |= CEPH_CAP_FILE_RD;
        if (ci->i_rdcache_ref ||
-           (S_ISREG(ci->vfs_inode.i_mode) &&
-            ci->vfs_inode.i_data.nrpages))
+           (S_ISREG(ci->netfs.inode.i_mode) &&
+            ci->netfs.inode.i_data.nrpages))
                used |= CEPH_CAP_FILE_CACHE;
        if (ci->i_wr_ref)
                used |= CEPH_CAP_FILE_WR;
@@ -993,11 +993,11 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
        const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
        const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
        struct ceph_mount_options *opt =
-               ceph_inode_to_client(&ci->vfs_inode)->mount_options;
+               ceph_inode_to_client(&ci->netfs.inode)->mount_options;
        unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
        unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
 
-       if (S_ISDIR(ci->vfs_inode.i_mode)) {
+       if (S_ISDIR(ci->netfs.inode.i_mode)) {
                int want = 0;
 
                /* use used_cutoff here, to keep dir's wanted caps longer */
@@ -1050,7 +1050,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
 int __ceph_caps_wanted(struct ceph_inode_info *ci)
 {
        int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci);
-       if (S_ISDIR(ci->vfs_inode.i_mode)) {
+       if (S_ISDIR(ci->netfs.inode.i_mode)) {
                /* we want EXCL if holding caps of dir ops */
                if (w & CEPH_CAP_ANY_DIR_OPS)
                        w |= CEPH_CAP_FILE_EXCL;
@@ -1116,9 +1116,9 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+       dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode);
 
-       mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
+       mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc;
 
        /* remove from inode's cap rbtree, and clear auth cap */
        rb_erase(&cap->ci_node, &ci->i_caps);
@@ -1169,7 +1169,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
                 * keep i_snap_realm.
                 */
                if (ci->i_wr_ref == 0 && ci->i_snap_realm)
-                       ceph_change_snap_realm(&ci->vfs_inode, NULL);
+                       ceph_change_snap_realm(&ci->netfs.inode, NULL);
 
                __cap_delay_cancel(mdsc, ci);
        }
@@ -1188,11 +1188,11 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       fsc = ceph_inode_to_client(&ci->vfs_inode);
+       fsc = ceph_inode_to_client(&ci->netfs.inode);
        WARN_ON_ONCE(ci->i_auth_cap == cap &&
                     !list_empty(&ci->i_dirty_item) &&
                     !fsc->blocklisted &&
-                    !ceph_inode_is_shutdown(&ci->vfs_inode));
+                    !ceph_inode_is_shutdown(&ci->netfs.inode));
 
        __ceph_remove_cap(cap, queue_release);
 }
@@ -1343,7 +1343,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
                       int flushing, u64 flush_tid, u64 oldest_flush_tid)
 {
        struct ceph_inode_info *ci = cap->ci;
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        int held, revoking;
 
        lockdep_assert_held(&ci->i_ceph_lock);
@@ -1440,7 +1440,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
 static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
 {
        struct ceph_msg *msg;
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
 
        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false);
        if (!msg) {
@@ -1528,7 +1528,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
                __releases(ci->i_ceph_lock)
                __acquires(ci->i_ceph_lock)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct ceph_cap_snap *capsnap;
        u64 oldest_flush_tid = 0;
@@ -1622,7 +1622,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
 void ceph_flush_snaps(struct ceph_inode_info *ci,
                      struct ceph_mds_session **psession)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        struct ceph_mds_session *session = NULL;
        int mds;
@@ -1682,8 +1682,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
                           struct ceph_cap_flush **pcf)
 {
        struct ceph_mds_client *mdsc =
-               ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
-       struct inode *inode = &ci->vfs_inode;
+               ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc;
+       struct inode *inode = &ci->netfs.inode;
        int was = ci->i_dirty_caps;
        int dirty = 0;
 
@@ -1696,7 +1696,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
                return 0;
        }
 
-       dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
+       dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode,
             ceph_cap_string(mask), ceph_cap_string(was),
             ceph_cap_string(was | mask));
        ci->i_dirty_caps |= mask;
@@ -1712,7 +1712,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
                                ci->i_snap_realm->cached_context);
                }
                dout(" inode %p now dirty snapc %p auth cap %p\n",
-                    &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
+                    &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap);
                BUG_ON(!list_empty(&ci->i_dirty_item));
                spin_lock(&mdsc->cap_dirty_lock);
                list_add(&ci->i_dirty_item, &session->s_cap_dirty);
@@ -1875,7 +1875,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
 
 bool __ceph_should_report_size(struct ceph_inode_info *ci)
 {
-       loff_t size = i_size_read(&ci->vfs_inode);
+       loff_t size = i_size_read(&ci->netfs.inode);
        /* mds will adjust max size according to the reported size */
        if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
                return false;
@@ -1900,7 +1900,7 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci)
 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                     struct ceph_mds_session *session)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
        struct ceph_cap *cap;
        u64 flush_tid, oldest_flush_tid;
@@ -2467,7 +2467,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
        __releases(ci->i_ceph_lock)
        __acquires(ci->i_ceph_lock)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_cap *cap;
        struct ceph_cap_flush *cf;
        int ret;
@@ -2560,7 +2560,7 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
                cap = ci->i_auth_cap;
                if (!(cap && cap->session == session)) {
                        pr_err("%p auth cap %p not mds%d ???\n",
-                               &ci->vfs_inode, cap, session->s_mds);
+                               &ci->netfs.inode, cap, session->s_mds);
                        spin_unlock(&ci->i_ceph_lock);
                        continue;
                }
@@ -2610,7 +2610,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                cap = ci->i_auth_cap;
                if (!(cap && cap->session == session)) {
                        pr_err("%p auth cap %p not mds%d ???\n",
-                               &ci->vfs_inode, cap, session->s_mds);
+                               &ci->netfs.inode, cap, session->s_mds);
                        spin_unlock(&ci->i_ceph_lock);
                        continue;
                }
@@ -2630,7 +2630,7 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("%s %p flushing %s\n", __func__, &ci->vfs_inode,
+       dout("%s %p flushing %s\n", __func__, &ci->netfs.inode,
             ceph_cap_string(ci->i_flushing_caps));
 
        if (!list_empty(&ci->i_cap_flush_list)) {
@@ -2673,10 +2673,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
        }
        if (got & CEPH_CAP_FILE_BUFFER) {
                if (ci->i_wb_ref == 0)
-                       ihold(&ci->vfs_inode);
+                       ihold(&ci->netfs.inode);
                ci->i_wb_ref++;
                dout("%s %p wb %d -> %d (?)\n", __func__,
-                    &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
+                    &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref);
        }
 }
 
@@ -3004,7 +3004,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
                        return ret;
                }
 
-               if (S_ISREG(ci->vfs_inode.i_mode) &&
+               if (S_ISREG(ci->netfs.inode.i_mode) &&
                    ci->i_inline_version != CEPH_INLINE_NONE &&
                    (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
                    i_size_read(inode) > 0) {
@@ -3094,7 +3094,7 @@ enum put_cap_refs_mode {
 static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
                                enum put_cap_refs_mode mode)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
        bool check_flushsnaps = false;
 
@@ -3202,7 +3202,7 @@ void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                struct ceph_snap_context *snapc)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_cap_snap *capsnap = NULL, *iter;
        int put = 0;
        bool last = false;
@@ -3698,7 +3698,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                     session->s_mds,
                                     &list_first_entry(&session->s_cap_flushing,
                                                struct ceph_inode_info,
-                                               i_flushing_item)->vfs_inode);
+                                               i_flushing_item)->netfs.inode);
                        }
                }
                mdsc->num_cap_flushing--;
@@ -4345,7 +4345,7 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                        break;
                list_del_init(&ci->i_cap_delay_list);
 
-               inode = igrab(&ci->vfs_inode);
+               inode = igrab(&ci->netfs.inode);
                if (inode) {
                        spin_unlock(&mdsc->cap_delay_lock);
                        dout("check_delayed_caps on %p\n", inode);
@@ -4373,7 +4373,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
        while (!list_empty(&s->s_cap_dirty)) {
                ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
                                      i_dirty_item);
-               inode = &ci->vfs_inode;
+               inode = &ci->netfs.inode;
                ihold(inode);
                dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
                spin_unlock(&mdsc->cap_dirty_lock);
@@ -4407,7 +4407,7 @@ void __ceph_touch_fmode(struct ceph_inode_info *ci,
 
 void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
        int bits = (fmode << 1) | 1;
        bool already_opened = false;
        int i;
@@ -4441,7 +4441,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
  */
 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
        int bits = (fmode << 1) | 1;
        bool is_closed = true;
        int i;
@@ -4656,7 +4656,7 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
        lockdep_assert_held(&ci->i_ceph_lock);
 
        dout("removing cap %p, ci is %p, inode is %p\n",
-            cap, ci, &ci->vfs_inode);
+            cap, ci, &ci->netfs.inode);
 
        is_auth = (cap == ci->i_auth_cap);
        __ceph_remove_cap(cap, false);
index 8c8226c..da59e83 100644 (file)
@@ -205,7 +205,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mount_options *opt =
-               ceph_inode_to_client(&ci->vfs_inode)->mount_options;
+               ceph_inode_to_client(&ci->netfs.inode)->mount_options;
        struct ceph_file_info *fi;
        int ret;
 
index b7e9cac..56c53ab 100644 (file)
@@ -176,7 +176,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
        rb_insert_color(&frag->node, &ci->i_fragtree);
 
        dout("get_or_create_frag added %llx.%llx frag %x\n",
-            ceph_vinop(&ci->vfs_inode), f);
+            ceph_vinop(&ci->netfs.inode), f);
        return frag;
 }
 
@@ -457,10 +457,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        if (!ci)
                return NULL;
 
-       dout("alloc_inode %p\n", &ci->vfs_inode);
+       dout("alloc_inode %p\n", &ci->netfs.inode);
 
        /* Set parameters for the netfs library */
-       netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops);
+       netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
 
        spin_lock_init(&ci->i_ceph_lock);
 
@@ -547,7 +547,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        INIT_WORK(&ci->i_work, ceph_inode_work);
        ci->i_work_mask = 0;
        memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
-       return &ci->vfs_inode;
+       return &ci->netfs.inode;
 }
 
 void ceph_free_inode(struct inode *inode)
@@ -1978,7 +1978,7 @@ static void ceph_inode_work(struct work_struct *work)
 {
        struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
                                                 i_work);
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
 
        if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
                dout("writeback %p\n", inode);
index f5d110d..33f517d 100644 (file)
@@ -1564,7 +1564,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
        p = session->s_caps.next;
        while (p != &session->s_caps) {
                cap = list_entry(p, struct ceph_cap, session_caps);
-               inode = igrab(&cap->ci->vfs_inode);
+               inode = igrab(&cap->ci->netfs.inode);
                if (!inode) {
                        p = p->next;
                        continue;
@@ -1622,7 +1622,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
        int iputs;
 
        dout("removing cap %p, ci is %p, inode is %p\n",
-            cap, ci, &ci->vfs_inode);
+            cap, ci, &ci->netfs.inode);
        spin_lock(&ci->i_ceph_lock);
        iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
        spin_unlock(&ci->i_ceph_lock);
index 322ee5a..864cdaa 100644 (file)
@@ -521,7 +521,7 @@ static bool has_new_snaps(struct ceph_snap_context *o,
 static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
                                struct ceph_cap_snap **pcapsnap)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_snap_context *old_snapc, *new_snapc;
        struct ceph_cap_snap *capsnap = *pcapsnap;
        struct ceph_buffer *old_blob = NULL;
@@ -652,7 +652,7 @@ update_snapc:
 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                            struct ceph_cap_snap *capsnap)
 {
-       struct inode *inode = &ci->vfs_inode;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
 
        BUG_ON(capsnap->writing);
@@ -712,7 +712,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
 
        spin_lock(&realm->inodes_with_caps_lock);
        list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
-               struct inode *inode = igrab(&ci->vfs_inode);
+               struct inode *inode = igrab(&ci->netfs.inode);
                if (!inode)
                        continue;
                spin_unlock(&realm->inodes_with_caps_lock);
@@ -904,7 +904,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
        while (!list_empty(&mdsc->snap_flush_list)) {
                ci = list_first_entry(&mdsc->snap_flush_list,
                                struct ceph_inode_info, i_snap_flush_item);
-               inode = &ci->vfs_inode;
+               inode = &ci->netfs.inode;
                ihold(inode);
                spin_unlock(&mdsc->snap_flush_lock);
                ceph_flush_snaps(ci, &session);
index b73b4f7..4014080 100644 (file)
@@ -876,7 +876,7 @@ mempool_t *ceph_wb_pagevec_pool;
 static void ceph_inode_init_once(void *foo)
 {
        struct ceph_inode_info *ci = foo;
-       inode_init_once(&ci->vfs_inode);
+       inode_init_once(&ci->netfs.inode);
 }
 
 static int __init init_caches(void)
index dd7dac0..f59dac6 100644 (file)
@@ -316,11 +316,7 @@ struct ceph_inode_xattrs_info {
  * Ceph inode.
  */
 struct ceph_inode_info {
-       struct {
-               /* These must be contiguous */
-               struct inode vfs_inode;
-               struct netfs_i_context netfs_ctx; /* Netfslib context */
-       };
+       struct netfs_inode netfs; /* Netfslib context and vfs inode */
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
        spinlock_t i_ceph_lock;
@@ -436,7 +432,7 @@ struct ceph_inode_info {
 static inline struct ceph_inode_info *
 ceph_inode(const struct inode *inode)
 {
-       return container_of(inode, struct ceph_inode_info, vfs_inode);
+       return container_of(inode, struct ceph_inode_info, netfs.inode);
 }
 
 static inline struct ceph_fs_client *
@@ -1316,7 +1312,7 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
        has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
 
        if (had_quota != has_quota)
-               ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
+               ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota);
 }
 
 extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
index 8c2dc2c..f141f52 100644 (file)
@@ -57,7 +57,7 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
 static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
                                    size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        struct ceph_string *pool_ns;
        s64 pool = ci->i_layout.pool_id;
@@ -69,7 +69,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
 
        pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
 
-       dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
+       dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode);
        down_read(&osdc->lock);
        pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
        if (pool_name) {
@@ -161,7 +161,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
                                         char *val, size_t size)
 {
        ssize_t ret;
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        s64 pool = ci->i_layout.pool_id;
        const char *pool_name;
@@ -313,7 +313,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
 static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
                                          char *val, size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
 
        return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
 }
@@ -321,7 +321,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
 static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
                                       char *val, size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
 
        return ceph_fmt_xattr(val, size, "client%lld",
                              ceph_client_gid(fsc->client));
@@ -629,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
        }
 
        dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n",
-            ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val);
+            ceph_vinop(&ci->netfs.inode), xattr, name_len, name, val_len, val);
 
        return 0;
 }
@@ -871,7 +871,7 @@ struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
        struct ceph_buffer *old_blob = NULL;
        void *dest;
 
-       dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
+       dout("__build_xattrs_blob %p\n", &ci->netfs.inode);
        if (ci->i_xattrs.dirty) {
                int need = __get_required_blob_size(ci, 0, 0);
 
index 12c8728..8f2e003 100644 (file)
@@ -377,7 +377,7 @@ cifs_alloc_inode(struct super_block *sb)
        cifs_inode->flags = 0;
        spin_lock_init(&cifs_inode->writers_lock);
        cifs_inode->writers = 0;
-       cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
+       cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
        cifs_inode->server_eof = 0;
        cifs_inode->uniqueid = 0;
        cifs_inode->createtime = 0;
@@ -389,12 +389,12 @@ cifs_alloc_inode(struct super_block *sb)
         * Can not set i_flags here - they get immediately overwritten to zero
         * by the VFS.
         */
-       /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
+       /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
        INIT_LIST_HEAD(&cifs_inode->openFileList);
        INIT_LIST_HEAD(&cifs_inode->llist);
        INIT_LIST_HEAD(&cifs_inode->deferred_closes);
        spin_lock_init(&cifs_inode->deferred_lock);
-       return &cifs_inode->vfs_inode;
+       return &cifs_inode->netfs.inode;
 }
 
 static void
@@ -1086,7 +1086,7 @@ struct file_system_type cifs_fs_type = {
 };
 MODULE_ALIAS_FS("cifs");
 
-static struct file_system_type smb3_fs_type = {
+struct file_system_type smb3_fs_type = {
        .owner = THIS_MODULE,
        .name = "smb3",
        .init_fs_context = smb3_init_fs_context,
@@ -1418,7 +1418,7 @@ cifs_init_once(void *inode)
 {
        struct cifsInodeInfo *cifsi = inode;
 
-       inode_init_once(&cifsi->vfs_inode);
+       inode_init_once(&cifsi->netfs.inode);
        init_rwsem(&cifsi->lock_sem);
 }
 
index dd7e070..b17be47 100644 (file)
@@ -38,7 +38,7 @@ static inline unsigned long cifs_get_time(struct dentry *dentry)
        return (unsigned long) dentry->d_fsdata;
 }
 
-extern struct file_system_type cifs_fs_type;
+extern struct file_system_type cifs_fs_type, smb3_fs_type;
 extern const struct address_space_operations cifs_addr_ops;
 extern const struct address_space_operations cifs_addr_ops_smallbuf;
 
index f873379..e773716 100644 (file)
@@ -1479,20 +1479,16 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 #define CIFS_CACHE_RW_FLG      (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
 #define CIFS_CACHE_RHW_FLG     (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
 
-#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
+#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
 #define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
-#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
+#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
 
 /*
  * One of these for each file inode
  */
 
 struct cifsInodeInfo {
-       struct {
-               /* These must be contiguous */
-               struct inode    vfs_inode;      /* the VFS's inode record */
-               struct netfs_i_context netfs_ctx; /* Netfslib context */
-       };
+       struct netfs_inode netfs; /* Netfslib context and vfs inode */
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
        /*
@@ -1531,7 +1527,7 @@ struct cifsInodeInfo {
 static inline struct cifsInodeInfo *
 CIFS_I(struct inode *inode)
 {
-       return container_of(inode, struct cifsInodeInfo, vfs_inode);
+       return container_of(inode, struct cifsInodeInfo, netfs.inode);
 }
 
 static inline struct cifs_sb_info *
index d46702f..1849e34 100644 (file)
@@ -97,6 +97,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        if (!server->hostname)
                return -EINVAL;
 
+       /* if server hostname isn't populated, there's nothing to do here */
+       if (server->hostname[0] == '\0')
+               return 0;
+
        len = strlen(server->hostname) + 3;
 
        unc = kmalloc(len, GFP_KERNEL);
index 1618e05..e64cda7 100644 (file)
@@ -2004,7 +2004,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                                        bool fsuid_only)
 {
        struct cifsFileInfo *open_file = NULL;
-       struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
@@ -2060,7 +2060,7 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
                return rc;
        }
 
-       cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+       cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
@@ -4669,14 +4669,14 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
                /* This inode is open for write at least once */
                struct cifs_sb_info *cifs_sb;
 
-               cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
+               cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
                        /* since no page cache to corrupt on directio
                        we can change size safely */
                        return true;
                }
 
-               if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
+               if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
                        return true;
 
                return false;
index a638b29..23ef56f 100644 (file)
@@ -101,13 +101,13 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
-       cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
+       cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
 
-       cifsi->netfs_ctx.cache =
+       cifsi->netfs.cache =
                fscache_acquire_cookie(tcon->fscache, 0,
                                       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
                                       &cd, sizeof(cd),
-                                      i_size_read(&cifsi->vfs_inode));
+                                      i_size_read(&cifsi->netfs.inode));
 }
 
 void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
@@ -131,7 +131,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
        if (cookie) {
                cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
                fscache_relinquish_cookie(cookie, false);
-               cifsi->netfs_ctx.cache = NULL;
+               cifsi->netfs.cache = NULL;
        }
 }
 
index 52355c0..aa3b941 100644 (file)
@@ -52,16 +52,16 @@ void cifs_fscache_fill_coherency(struct inode *inode,
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
        memset(cd, 0, sizeof(*cd));
-       cd->last_write_time_sec   = cpu_to_le64(cifsi->vfs_inode.i_mtime.tv_sec);
-       cd->last_write_time_nsec  = cpu_to_le32(cifsi->vfs_inode.i_mtime.tv_nsec);
-       cd->last_change_time_sec  = cpu_to_le64(cifsi->vfs_inode.i_ctime.tv_sec);
-       cd->last_change_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_ctime.tv_nsec);
+       cd->last_write_time_sec   = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
+       cd->last_write_time_nsec  = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
+       cd->last_change_time_sec  = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec);
+       cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec);
 }
 
 
 static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
 {
-       return netfs_i_cookie(inode);
+       return netfs_i_cookie(&CIFS_I(inode)->netfs);
 }
 
 static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
index 2f9e7d2..81da81e 100644 (file)
@@ -115,7 +115,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
                 __func__, cifs_i->uniqueid);
        set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
        /* Invalidate fscache cookie */
-       cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
+       cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd);
        fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
 }
 
@@ -2499,7 +2499,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
                u64 len)
 {
        struct cifsInodeInfo *cifs_i = CIFS_I(inode);
-       struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->vfs_inode.i_sb);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb);
        struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
        struct TCP_Server_Info *server = tcon->ses->server;
        struct cifsFileInfo *cfile;
index 35962a1..c69e124 100644 (file)
@@ -537,11 +537,11 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
        if (oplock == OPLOCK_EXCLUSIVE) {
                cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
                cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
-                        &cinode->vfs_inode);
+                        &cinode->netfs.inode);
        } else if (oplock == OPLOCK_READ) {
                cinode->oplock = CIFS_CACHE_READ_FLG;
                cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
-                        &cinode->vfs_inode);
+                        &cinode->netfs.inode);
        } else
                cinode->oplock = 0;
 }
@@ -1211,18 +1211,23 @@ static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void
                .data = data,
                .sb = NULL,
        };
+       struct file_system_type **fs_type = (struct file_system_type *[]) {
+               &cifs_fs_type, &smb3_fs_type, NULL,
+       };
 
-       iterate_supers_type(&cifs_fs_type, f, &sd);
-
-       if (!sd.sb)
-               return ERR_PTR(-EINVAL);
-       /*
-        * Grab an active reference in order to prevent automounts (DFS links)
-        * of expiring and then freeing up our cifs superblock pointer while
-        * we're doing failover.
-        */
-       cifs_sb_active(sd.sb);
-       return sd.sb;
+       for (; *fs_type; fs_type++) {
+               iterate_supers_type(*fs_type, f, &sd);
+               if (sd.sb) {
+                       /*
+                        * Grab an active reference in order to prevent automounts (DFS links)
+                        * of expiring and then freeing up our cifs superblock pointer while
+                        * we're doing failover.
+                        */
+                       cifs_sb_active(sd.sb);
+                       return sd.sb;
+               }
+       }
+       return ERR_PTR(-EINVAL);
 }
 
 static void __cifs_put_super(struct super_block *sb)
index 3b7915a..0bece97 100644 (file)
@@ -301,7 +301,10 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
        /* Auth */
        ctx.domainauto = ses->domainAuto;
        ctx.domainname = ses->domainName;
-       ctx.server_hostname = ses->server->hostname;
+
+       /* no hostname for extra channels */
+       ctx.server_hostname = "";
+
        ctx.username = ses->user_name;
        ctx.password = ses->password;
        ctx.sectype = ses->sectype;
index 98a76fa..8543caf 100644 (file)
@@ -4260,15 +4260,15 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
        if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
                cinode->oplock = CIFS_CACHE_RHW_FLG;
                cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
-                        &cinode->vfs_inode);
+                        &cinode->netfs.inode);
        } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
                cinode->oplock = CIFS_CACHE_RW_FLG;
                cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
-                        &cinode->vfs_inode);
+                        &cinode->netfs.inode);
        } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
                cinode->oplock = CIFS_CACHE_READ_FLG;
                cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
-                        &cinode->vfs_inode);
+                        &cinode->netfs.inode);
        } else
                cinode->oplock = 0;
 }
@@ -4307,7 +4307,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
 
        cinode->oplock = new_oplock;
        cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
-                &cinode->vfs_inode);
+                &cinode->netfs.inode);
 }
 
 static void
index 0e8c852..eaf975f 100644 (file)
@@ -288,6 +288,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
                        mutex_unlock(&ses->session_mutex);
                        rc = -EHOSTDOWN;
                        goto failed;
+               } else if (rc) {
+                       mutex_unlock(&ses->session_mutex);
+                       goto out;
                }
        } else {
                mutex_unlock(&ses->session_mutex);
index 360ce36..e6b9322 100644 (file)
@@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
        if (IS_ERR(raw_inode))
                return -EIO;
 
-       /* For fields not not tracking in the in-memory inode,
+       /* For fields not tracking in the in-memory inode,
         * initialise them to zero for new inodes. */
        if (ei->i_state & EXT2_STATE_NEW)
                memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
index a21d8f1..0522136 100644 (file)
@@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
                                      struct list_head *head)
 {
        assert_spin_locked(&wb->list_lock);
+       assert_spin_locked(&inode->i_lock);
 
        list_move(&inode->i_io_list, head);
 
@@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue,
                inode = wb_inode(delaying_queue->prev);
                if (inode_dirtied_after(inode, dirtied_before))
                        break;
+               spin_lock(&inode->i_lock);
                list_move(&inode->i_io_list, &tmp);
                moved++;
-               spin_lock(&inode->i_lock);
                inode->i_state |= I_SYNC_QUEUED;
                spin_unlock(&inode->i_lock);
                if (sb_is_blkdev_sb(inode->i_sb))
@@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue,
                goto out;
        }
 
-       /* Move inodes from one superblock together */
+       /*
+        * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
+        * we don't take inode->i_lock here because it is just a pointless overhead.
+        * Inode is already marked as I_SYNC_QUEUED so writeback list handling is
+        * fully under our control.
+        */
        while (!list_empty(&tmp)) {
                sb = wb_inode(tmp.prev)->i_sb;
                list_for_each_prev_safe(pos, node, &tmp) {
@@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb,
                         * We'll have another go at writing back this inode
                         * when we completed a full scan of b_io.
                         */
-                       spin_unlock(&inode->i_lock);
                        requeue_io(inode, wb);
+                       spin_unlock(&inode->i_lock);
                        trace_writeback_sb_inodes_requeue(inode);
                        continue;
                }
@@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
 {
        struct super_block *sb = inode->i_sb;
        int dirtytime = 0;
+       struct bdi_writeback *wb = NULL;
 
        trace_writeback_mark_inode_dirty(inode, flags);
 
@@ -2409,6 +2416,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                        inode->i_state &= ~I_DIRTY_TIME;
                inode->i_state |= flags;
 
+               /*
+                * Grab inode's wb early because it requires dropping i_lock and we
+                * need to make sure following checks happen atomically with dirty
+                * list handling so that we don't move inodes under flush worker's
+                * hands.
+                */
+               if (!was_dirty) {
+                       wb = locked_inode_to_wb_and_lock_list(inode);
+                       spin_lock(&inode->i_lock);
+               }
+
                /*
                 * If the inode is queued for writeback by flush worker, just
                 * update its dirty state. Once the flush worker is done with
@@ -2416,7 +2434,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                 * list, based upon its state.
                 */
                if (inode->i_state & I_SYNC_QUEUED)
-                       goto out_unlock_inode;
+                       goto out_unlock;
 
                /*
                 * Only add valid (hashed) inodes to the superblock's
@@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                 */
                if (!S_ISBLK(inode->i_mode)) {
                        if (inode_unhashed(inode))
-                               goto out_unlock_inode;
+                               goto out_unlock;
                }
                if (inode->i_state & I_FREEING)
-                       goto out_unlock_inode;
+                       goto out_unlock;
 
                /*
                 * If the inode was already on b_dirty/b_io/b_more_io, don't
                 * reposition it (that would break b_dirty time-ordering).
                 */
                if (!was_dirty) {
-                       struct bdi_writeback *wb;
                        struct list_head *dirty_list;
                        bool wakeup_bdi = false;
 
-                       wb = locked_inode_to_wb_and_lock_list(inode);
-
                        inode->dirtied_when = jiffies;
                        if (dirtytime)
                                inode->dirtied_time_when = jiffies;
@@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                                                               dirty_list);
 
                        spin_unlock(&wb->list_lock);
+                       spin_unlock(&inode->i_lock);
                        trace_writeback_dirty_inode_enqueue(inode);
 
                        /*
@@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                        return;
                }
        }
+out_unlock:
+       if (wb)
+               spin_unlock(&wb->list_lock);
 out_unlock_inode:
        spin_unlock(&inode->i_lock);
 }
index 9d9b422..bd4da9c 100644 (file)
@@ -27,7 +27,7 @@
  * Inode locking rules:
  *
  * inode->i_lock protects:
- *   inode->i_state, inode->i_hash, __iget()
+ *   inode->i_state, inode->i_hash, __iget(), inode->i_io_list
  * Inode LRU list locks protect:
  *   inode->i_sb->s_inode_lru, inode->i_lru
  * inode->i_sb->s_inode_list_lock protects:
index 8742d22..42f892c 100644 (file)
@@ -155,7 +155,7 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
 void netfs_readahead(struct readahead_control *ractl)
 {
        struct netfs_io_request *rreq;
-       struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
+       struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
        int ret;
 
        _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
@@ -215,7 +215,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
 {
        struct address_space *mapping = folio_file_mapping(folio);
        struct netfs_io_request *rreq;
-       struct netfs_i_context *ctx = netfs_i_context(mapping->host);
+       struct netfs_inode *ctx = netfs_inode(mapping->host);
        int ret;
 
        _enter("%lx", folio_index(folio));
@@ -297,6 +297,7 @@ zero_out:
 
 /**
  * netfs_write_begin - Helper to prepare for writing
+ * @ctx: The netfs context
  * @file: The file to read from
  * @mapping: The mapping to read from
  * @pos: File position at which the write will begin
@@ -326,12 +327,12 @@ zero_out:
  *
  * This is usable whether or not caching is enabled.
  */
-int netfs_write_begin(struct file *file, struct address_space *mapping,
+int netfs_write_begin(struct netfs_inode *ctx,
+                     struct file *file, struct address_space *mapping,
                      loff_t pos, unsigned int len, struct folio **_folio,
                      void **_fsdata)
 {
        struct netfs_io_request *rreq;
-       struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
        struct folio *folio;
        unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
        pgoff_t index = pos >> PAGE_SHIFT;
index b7b0e3d..43fac1b 100644 (file)
@@ -91,7 +91,7 @@ static inline void netfs_stat_d(atomic_t *stat)
 /*
  * Miscellaneous functions.
  */
-static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
+static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
 {
 #if IS_ENABLED(CONFIG_FSCACHE)
        struct fscache_cookie *cookie = ctx->cache;
index e86107b..e17cdf5 100644 (file)
@@ -18,7 +18,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
 {
        static atomic_t debug_ids;
        struct inode *inode = file ? file_inode(file) : mapping->host;
-       struct netfs_i_context *ctx = netfs_i_context(inode);
+       struct netfs_inode *ctx = netfs_inode(inode);
        struct netfs_io_request *rreq;
        int ret;
 
@@ -75,10 +75,10 @@ static void netfs_free_request(struct work_struct *work)
        struct netfs_io_request *rreq =
                container_of(work, struct netfs_io_request, work);
 
-       netfs_clear_subrequests(rreq, false);
-       if (rreq->netfs_priv)
-               rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
        trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+       netfs_clear_subrequests(rreq, false);
+       if (rreq->netfs_ops->free_request)
+               rreq->netfs_ops->free_request(rreq);
        if (rreq->cache_resources.ops)
                rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
        kfree(rreq);
index f172412..9cb2d59 100644 (file)
@@ -309,11 +309,12 @@ nfsd_file_put(struct nfsd_file *nf)
        if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
                nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
-       } else {
+       } else if (nf->nf_file) {
                nfsd_file_put_noref(nf);
-               if (nf->nf_file)
-                       nfsd_file_schedule_laundrette();
-       }
+               nfsd_file_schedule_laundrette();
+       } else
+               nfsd_file_put_noref(nf);
+
        if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
                nfsd_file_gc();
 }
index a74aef9..09d1307 100644 (file)
@@ -79,6 +79,7 @@
 #include <linux/capability.h>
 #include <linux/quotaops.h>
 #include <linux/blkdev.h>
+#include <linux/sched/mm.h>
 #include "../internal.h" /* ugh */
 
 #include <linux/uaccess.h>
@@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty);
 int dquot_acquire(struct dquot *dquot)
 {
        int ret = 0, ret2 = 0;
+       unsigned int memalloc;
        struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 
        mutex_lock(&dquot->dq_lock);
+       memalloc = memalloc_nofs_save();
        if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
                ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
                if (ret < 0)
@@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot)
        smp_mb__before_atomic();
        set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 out_iolock:
+       memalloc_nofs_restore(memalloc);
        mutex_unlock(&dquot->dq_lock);
        return ret;
 }
@@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire);
 int dquot_commit(struct dquot *dquot)
 {
        int ret = 0;
+       unsigned int memalloc;
        struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 
        mutex_lock(&dquot->dq_lock);
+       memalloc = memalloc_nofs_save();
        if (!clear_dquot_dirty(dquot))
                goto out_lock;
        /* Inactive dquot can be only if there was error during read/init
@@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot)
        else
                ret = -EIO;
 out_lock:
+       memalloc_nofs_restore(memalloc);
        mutex_unlock(&dquot->dq_lock);
        return ret;
 }
@@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit);
 int dquot_release(struct dquot *dquot)
 {
        int ret = 0, ret2 = 0;
+       unsigned int memalloc;
        struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
 
        mutex_lock(&dquot->dq_lock);
+       memalloc = memalloc_nofs_save();
        /* Check whether we are not racing with some other dqget() */
        if (dquot_is_busy(dquot))
                goto out_dqlock;
@@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot)
        }
        clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
 out_dqlock:
+       memalloc_nofs_restore(memalloc);
        mutex_unlock(&dquot->dq_lock);
        return ret;
 }
index bcb21ae..0532997 100644 (file)
@@ -110,15 +110,51 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
        }
 }
 
-static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
-                             unsigned int flags, struct iomap *iomap,
-                             struct iomap *srcmap)
+static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
+                                  loff_t length, unsigned int flags,
+                                  struct iomap *iomap, struct iomap *srcmap)
 {
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
        struct super_block *sb = inode->i_sb;
        loff_t isize;
 
-       /* All I/Os should always be within the file maximum size */
+       /*
+        * All blocks are always mapped below EOF. If reading past EOF,
+        * act as if there is a hole up to the file maximum size.
+        */
+       mutex_lock(&zi->i_truncate_mutex);
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+       isize = i_size_read(inode);
+       if (iomap->offset >= isize) {
+               iomap->type = IOMAP_HOLE;
+               iomap->addr = IOMAP_NULL_ADDR;
+               iomap->length = length;
+       } else {
+               iomap->type = IOMAP_MAPPED;
+               iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+               iomap->length = isize - iomap->offset;
+       }
+       mutex_unlock(&zi->i_truncate_mutex);
+
+       trace_zonefs_iomap_begin(inode, iomap);
+
+       return 0;
+}
+
+static const struct iomap_ops zonefs_read_iomap_ops = {
+       .iomap_begin    = zonefs_read_iomap_begin,
+};
+
+static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
+                                   loff_t length, unsigned int flags,
+                                   struct iomap *iomap, struct iomap *srcmap)
+{
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       struct super_block *sb = inode->i_sb;
+       loff_t isize;
+
+       /* All write I/Os should always be within the file maximum size */
        if (WARN_ON_ONCE(offset + length > zi->i_max_size))
                return -EIO;
 
@@ -128,7 +164,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
         * operation.
         */
        if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
-                        (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
+                        !(flags & IOMAP_DIRECT)))
                return -EIO;
 
        /*
@@ -137,47 +173,44 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
         * write pointer) and unwriten beyond.
         */
        mutex_lock(&zi->i_truncate_mutex);
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+       iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
        isize = i_size_read(inode);
-       if (offset >= isize)
+       if (iomap->offset >= isize) {
                iomap->type = IOMAP_UNWRITTEN;
-       else
+               iomap->length = zi->i_max_size - iomap->offset;
+       } else {
                iomap->type = IOMAP_MAPPED;
-       if (flags & IOMAP_WRITE)
-               length = zi->i_max_size - offset;
-       else
-               length = min(length, isize - offset);
+               iomap->length = isize - iomap->offset;
+       }
        mutex_unlock(&zi->i_truncate_mutex);
 
-       iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
-       iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
-       iomap->bdev = inode->i_sb->s_bdev;
-       iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
-
        trace_zonefs_iomap_begin(inode, iomap);
 
        return 0;
 }
 
-static const struct iomap_ops zonefs_iomap_ops = {
-       .iomap_begin    = zonefs_iomap_begin,
+static const struct iomap_ops zonefs_write_iomap_ops = {
+       .iomap_begin    = zonefs_write_iomap_begin,
 };
 
 static int zonefs_read_folio(struct file *unused, struct folio *folio)
 {
-       return iomap_read_folio(folio, &zonefs_iomap_ops);
+       return iomap_read_folio(folio, &zonefs_read_iomap_ops);
 }
 
 static void zonefs_readahead(struct readahead_control *rac)
 {
-       iomap_readahead(rac, &zonefs_iomap_ops);
+       iomap_readahead(rac, &zonefs_read_iomap_ops);
 }
 
 /*
  * Map blocks for page writeback. This is used only on conventional zone files,
  * which implies that the page range can only be within the fixed inode size.
  */
-static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
-                            struct inode *inode, loff_t offset)
+static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
+                                  struct inode *inode, loff_t offset)
 {
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
 
@@ -191,12 +224,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
            offset < wpc->iomap.offset + wpc->iomap.length)
                return 0;
 
-       return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
-                                 IOMAP_WRITE, &wpc->iomap, NULL);
+       return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
+                                       IOMAP_WRITE, &wpc->iomap, NULL);
 }
 
 static const struct iomap_writeback_ops zonefs_writeback_ops = {
-       .map_blocks             = zonefs_map_blocks,
+       .map_blocks             = zonefs_write_map_blocks,
 };
 
 static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
@@ -226,7 +259,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis,
                return -EINVAL;
        }
 
-       return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
+       return iomap_swapfile_activate(sis, swap_file, span,
+                                      &zonefs_read_iomap_ops);
 }
 
 static const struct address_space_operations zonefs_file_aops = {
@@ -647,7 +681,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
 
        /* Serialize against truncates */
        filemap_invalidate_lock_shared(inode->i_mapping);
-       ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
+       ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
        filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
@@ -899,7 +933,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
        if (append)
                ret = zonefs_file_dio_append(iocb, from);
        else
-               ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
+               ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
                                   &zonefs_write_dio_ops, 0, NULL, 0);
        if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
            (ret > 0 || ret == -EIOCBQUEUED)) {
@@ -948,7 +982,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
        if (ret <= 0)
                goto inode_unlock;
 
-       ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
+       ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
        if (ret > 0)
                iocb->ki_pos += ret;
        else if (ret == -EIO)
@@ -1041,7 +1075,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                        goto inode_unlock;
                }
                file_accessed(iocb->ki_filp);
-               ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
+               ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
                                   &zonefs_read_dio_ops, 0, NULL, 0);
        } else {
                ret = generic_file_read_iter(iocb, to);
@@ -1085,7 +1119,8 @@ static int zonefs_seq_file_write_open(struct inode *inode)
 
                if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
 
-                       if (wro > sbi->s_max_wro_seq_files) {
+                       if (sbi->s_max_wro_seq_files
+                           && wro > sbi->s_max_wro_seq_files) {
                                atomic_dec(&sbi->s_wro_seq_files);
                                ret = -EBUSY;
                                goto unlock;
@@ -1760,12 +1795,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
 
        atomic_set(&sbi->s_wro_seq_files, 0);
        sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev);
-       if (!sbi->s_max_wro_seq_files &&
-           sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
-               zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n");
-               sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
-       }
-
        atomic_set(&sbi->s_active_seq_files, 0);
        sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev);
 
@@ -1790,6 +1819,14 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
        zonefs_info(sb, "Mounting %u zones",
                    blkdev_nr_zones(sb->s_bdev->bd_disk));
 
+       if (!sbi->s_max_wro_seq_files &&
+           !sbi->s_max_active_seq_files &&
+           sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
+               zonefs_info(sb,
+                       "No open and active zone limits. Ignoring explicit_open mount option\n");
+               sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
+       }
+
        /* Create root directory inode */
        ret = -ENOMEM;
        inode = new_inode(sb);
index 302506b..8e47d48 100644 (file)
@@ -44,6 +44,7 @@ mandatory-y += msi.h
 mandatory-y += pci.h
 mandatory-y += percpu.h
 mandatory-y += pgalloc.h
+mandatory-y += platform-feature.h
 mandatory-y += preempt.h
 mandatory-y += rwonce.h
 mandatory-y += sections.h
diff --git a/include/asm-generic/platform-feature.h b/include/asm-generic/platform-feature.h
new file mode 100644 (file)
index 0000000..4b0af3d
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_PLATFORM_FEATURE_H
+#define _ASM_GENERIC_PLATFORM_FEATURE_H
+
+/* Number of arch specific feature flags. */
+#define PLATFORM_ARCH_FEAT_N   0
+
+#endif /* _ASM_GENERIC_PLATFORM_FEATURE_H */
index 1cf3738..992ee98 100644 (file)
@@ -403,7 +403,6 @@ enum {
 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
 extern void bioset_exit(struct bio_set *);
 extern int biovec_init_pool(mempool_t *pool, int pool_entries);
-extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
 
 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
                             unsigned int opf, gfp_t gfp_mask,
index a436705..2f991a4 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Implements the standard CRC ITU-T V.41:
  *   Width 16
- *   Poly  0x1021 (x^16 + x^12 + x^15 + 1)
+ *   Poly  0x1021 (x^16 + x^12 + x^5 + 1)
  *   Init  0
  */
 
index 732de90..0f2a59c 100644 (file)
@@ -822,7 +822,6 @@ struct ata_port {
        struct ata_queued_cmd   qcmd[ATA_MAX_QUEUE + 1];
        u64                     qc_active;
        int                     nr_active_links; /* #links with active qcs */
-       unsigned int            sas_last_tag;   /* track next tag hw expects */
 
        struct ata_link         link;           /* host default link */
        struct ata_link         *slave_link;    /* see ata_slave_link_init() */
index b34ff2c..c29ab4c 100644 (file)
@@ -227,6 +227,7 @@ struct page {
  * struct folio - Represents a contiguous set of bytes.
  * @flags: Identical to the page flags.
  * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mlock_count: Number of times this folio has been pinned by mlock().
  * @mapping: The file this page belongs to, or refers to the anon_vma for
  *    anonymous memory.
  * @index: Offset within the file, in units of pages.  For anonymous memory,
@@ -255,10 +256,14 @@ struct folio {
                        unsigned long flags;
                        union {
                                struct list_head lru;
+       /* private: avoid cluttering the output */
                                struct {
                                        void *__filler;
+       /* public: */
                                        unsigned int mlock_count;
+       /* private: */
                                };
+       /* public: */
                        };
                        struct address_space *mapping;
                        pgoff_t index;
index 77fa6a6..097cdd6 100644 (file)
@@ -119,9 +119,10 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
                                      bool was_async);
 
 /*
- * Per-inode description.  This must be directly after the inode struct.
+ * Per-inode context.  This wraps the VFS inode.
  */
-struct netfs_i_context {
+struct netfs_inode {
+       struct inode            inode;          /* The VFS inode */
        const struct netfs_request_ops *ops;
 #if IS_ENABLED(CONFIG_FSCACHE)
        struct fscache_cookie   *cache;
@@ -205,7 +206,9 @@ struct netfs_io_request {
  */
 struct netfs_request_ops {
        int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+       void (*free_request)(struct netfs_io_request *rreq);
        int (*begin_cache_operation)(struct netfs_io_request *rreq);
+
        void (*expand_readahead)(struct netfs_io_request *rreq);
        bool (*clamp_length)(struct netfs_io_subrequest *subreq);
        void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -213,7 +216,6 @@ struct netfs_request_ops {
        int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                 struct folio *folio, void **_fsdata);
        void (*done)(struct netfs_io_request *rreq);
-       void (*cleanup)(struct address_space *mapping, void *netfs_priv);
 };
 
 /*
@@ -256,7 +258,7 @@ struct netfs_cache_ops {
         * boundary as appropriate.
         */
        enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
-                                              loff_t i_size);
+                                            loff_t i_size);
 
        /* Prepare a write operation, working out what part of the write we can
         * actually do.
@@ -276,7 +278,8 @@ struct netfs_cache_ops {
 struct readahead_control;
 extern void netfs_readahead(struct readahead_control *);
 int netfs_read_folio(struct file *, struct folio *);
-extern int netfs_write_begin(struct file *, struct address_space *,
+extern int netfs_write_begin(struct netfs_inode *,
+                            struct file *, struct address_space *,
                             loff_t, unsigned int, struct folio **,
                             void **);
 
@@ -288,71 +291,56 @@ extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
 extern void netfs_stats_show(struct seq_file *);
 
 /**
- * netfs_i_context - Get the netfs inode context from the inode
+ * netfs_inode - Get the netfs inode context from the inode
  * @inode: The inode to query
  *
  * Get the netfs lib inode context from the network filesystem's inode.  The
  * context struct is expected to directly follow on from the VFS inode struct.
  */
-static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
-{
-       return (void *)inode + sizeof(*inode);
-}
-
-/**
- * netfs_inode - Get the netfs inode from the inode context
- * @ctx: The context to query
- *
- * Get the netfs inode from the netfs library's inode context.  The VFS inode
- * is expected to directly precede the context struct.
- */
-static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
+static inline struct netfs_inode *netfs_inode(struct inode *inode)
 {
-       return (void *)ctx - sizeof(struct inode);
+       return container_of(inode, struct netfs_inode, inode);
 }
 
 /**
- * netfs_i_context_init - Initialise a netfs lib context
- * @inode: The inode with which the context is associated
+ * netfs_inode_init - Initialise a netfslib inode context
+ * @inode: The netfs inode to initialise
  * @ops: The netfs's operations list
  *
  * Initialise the netfs library context struct.  This is expected to follow on
  * directly from the VFS inode struct.
  */
-static inline void netfs_i_context_init(struct inode *inode,
-                                       const struct netfs_request_ops *ops)
+static inline void netfs_inode_init(struct netfs_inode *ctx,
+                                   const struct netfs_request_ops *ops)
 {
-       struct netfs_i_context *ctx = netfs_i_context(inode);
-
-       memset(ctx, 0, sizeof(*ctx));
        ctx->ops = ops;
-       ctx->remote_i_size = i_size_read(inode);
+       ctx->remote_i_size = i_size_read(&ctx->inode);
+#if IS_ENABLED(CONFIG_FSCACHE)
+       ctx->cache = NULL;
+#endif
 }
 
 /**
  * netfs_resize_file - Note that a file got resized
- * @inode: The inode being resized
+ * @ctx: The netfs inode being resized
  * @new_i_size: The new file size
  *
  * Inform the netfs lib that a file got resized so that it can adjust its state.
  */
-static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
 {
-       struct netfs_i_context *ctx = netfs_i_context(inode);
-
        ctx->remote_i_size = new_i_size;
 }
 
 /**
  * netfs_i_cookie - Get the cache cookie from the inode
- * @inode: The inode to query
+ * @ctx: The netfs inode to query
  *
  * Get the caching cookie (if enabled) from the network filesystem's inode.
  */
-static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
+static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
 {
 #if IS_ENABLED(CONFIG_FSCACHE)
-       struct netfs_i_context *ctx = netfs_i_context(inode);
        return ctx->cache;
 #else
        return NULL;
diff --git a/include/linux/platform-feature.h b/include/linux/platform-feature.h
new file mode 100644 (file)
index 0000000..b2f48be
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PLATFORM_FEATURE_H
+#define _PLATFORM_FEATURE_H
+
+#include <linux/bitops.h>
+#include <asm/platform-feature.h>
+
+/* The platform features are starting with the architecture specific ones. */
+
+/* Used to enable platform specific DMA handling for virtio devices. */
+#define PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS  (0 + PLATFORM_ARCH_FEAT_N)
+
+#define PLATFORM_FEAT_N                                (1 + PLATFORM_ARCH_FEAT_N)
+
+void platform_set(unsigned int feature);
+void platform_clear(unsigned int feature);
+bool platform_has(unsigned int feature);
+
+#endif /* _PLATFORM_FEATURE_H */
index fae0c84..20e389a 100644 (file)
@@ -13,7 +13,7 @@
 struct notifier_block;
 
 void add_device_randomness(const void *buf, size_t len);
-void add_bootloader_randomness(const void *buf, size_t len);
+void __init add_bootloader_randomness(const void *buf, size_t len);
 void add_input_randomness(unsigned int type, unsigned int code,
                          unsigned int value) __latent_entropy;
 void add_interrupt_randomness(int irq) __latent_entropy;
@@ -74,7 +74,6 @@ static inline unsigned long get_random_canary(void)
 
 int __init random_init(const char *command_line);
 bool rng_is_initialized(void);
-bool rng_has_arch_random(void);
 int wait_for_random_bytes(void);
 
 /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
index 4417f66..5860f32 100644 (file)
@@ -243,7 +243,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
 extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
 extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
                size_t nbytes);
-extern void xdr_commit_encode(struct xdr_stream *xdr);
+extern void __xdr_commit_encode(struct xdr_stream *xdr);
 extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
 extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
@@ -306,6 +306,20 @@ xdr_reset_scratch_buffer(struct xdr_stream *xdr)
        xdr_set_scratch_buffer(xdr, NULL, 0);
 }
 
+/**
+ * xdr_commit_encode - Ensure all data is written to xdr->buf
+ * @xdr: pointer to xdr_stream
+ *
+ * Handle encoding across page boundaries by giving the caller a
+ * temporary location to write to, then later copying the data into
+ * place. __xdr_commit_encode() does that copying.
+ */
+static inline void xdr_commit_encode(struct xdr_stream *xdr)
+{
+       if (unlikely(xdr->scratch.iov_len))
+               __xdr_commit_encode(xdr);
+}
+
 /**
  * xdr_stream_remaining - Return the number of bytes remaining in the stream
  * @xdr: pointer to struct xdr_stream
index 4700a88..7b4a13d 100644 (file)
@@ -178,7 +178,8 @@ struct vdpa_map_file {
  *                             for the device
  *                             @vdev: vdpa device
  *                             Returns virtqueue algin requirement
- * @get_vq_group:              Get the group id for a specific virtqueue
+ * @get_vq_group:              Get the group id for a specific
+ *                             virtqueue (optional)
  *                             @vdev: vdpa device
  *                             @idx: virtqueue index
  *                             Returns u32: group id for this virtqueue
@@ -243,7 +244,7 @@ struct vdpa_map_file {
  *                             Returns the iova range supported by
  *                             the device.
  * @set_group_asid:            Set address space identifier for a
- *                             virtqueue group
+ *                             virtqueue group (optional)
  *                             @vdev: vdpa device
  *                             @group: virtqueue group
  *                             @asid: address space id for this group
index 9a36051..49c7c32 100644 (file)
@@ -604,13 +604,4 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
                _r;                                                     \
        })
 
-#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
-int arch_has_restricted_virtio_memory_access(void);
-#else
-static inline int arch_has_restricted_virtio_memory_access(void)
-{
-       return 0;
-}
-#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
-
 #endif /* _LINUX_VIRTIO_CONFIG_H */
index 7fee9b6..62e75dd 100644 (file)
@@ -406,7 +406,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
  * alloc_ordered_workqueue - allocate an ordered workqueue
  * @fmt: printf format for the name of the workqueue
  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args...: args for @fmt
+ * @args: args for @fmt
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
@@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
                        struct delayed_work *dwork, unsigned long delay);
 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
 
-extern void flush_workqueue(struct workqueue_struct *wq);
+extern void __flush_workqueue(struct workqueue_struct *wq);
 extern void drain_workqueue(struct workqueue_struct *wq);
 
 extern int schedule_on_each_cpu(work_func_t func);
@@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work)
        return queue_work(system_wq, work);
 }
 
+/*
+ * Detect attempt to flush system-wide workqueues at compile time when possible.
+ *
+ * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
+ * for reasons and steps for converting system-wide workqueues into local workqueues.
+ */
+extern void __warn_flushing_systemwide_wq(void)
+       __compiletime_warning("Please avoid flushing system-wide workqueues.");
+
 /**
  * flush_scheduled_work - ensure that any scheduled work has run to completion.
  *
  * Forces execution of the kernel-global workqueue and blocks until its
  * completion.
  *
- * Think twice before calling this function!  It's very easy to get into
- * trouble if you don't take great care.  Either of the following situations
- * will lead to deadlock:
+ * It's very easy to get into trouble if you don't take great care.
+ * Either of the following situations will lead to deadlock:
  *
  *     One of the work items currently on the workqueue needs to acquire
  *     a lock held by your code or its caller.
@@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work)
  * need to know that a particular work item isn't queued and isn't running.
  * In such cases you should use cancel_delayed_work_sync() or
  * cancel_work_sync() instead.
+ *
+ * Please stop calling this function! A conversion to stop flushing system-wide
+ * workqueues is in progress. This function will be removed after all in-tree
+ * users stopped calling this function.
  */
-static inline void flush_scheduled_work(void)
-{
-       flush_workqueue(system_wq);
-}
+/*
+ * The background of commit 771c035372a036f8 ("deprecate the
+ * '__deprecated' attribute warnings entirely and for good") is that,
+ * since Linus builds all modules between every single pull he does,
+ * the standard kernel build needs to be _clean_ in order to be able to
+ * notice when new problems happen. Therefore, don't emit warning while
+ * there are in-tree users.
+ */
+#define flush_scheduled_work()                                         \
+({                                                                     \
+       if (0)                                                          \
+               __warn_flushing_systemwide_wq();                        \
+       __flush_workqueue(system_wq);                                   \
+})
+
+/*
+ * Although there is no longer in-tree caller, for now just emit warning
+ * in order to give out-of-tree callers time to update.
+ */
+#define flush_workqueue(wq)                                            \
+({                                                                     \
+       struct workqueue_struct *_wq = (wq);                            \
+                                                                       \
+       if ((__builtin_constant_p(_wq == system_wq) &&                  \
+            _wq == system_wq) ||                                       \
+           (__builtin_constant_p(_wq == system_highpri_wq) &&          \
+            _wq == system_highpri_wq) ||                               \
+           (__builtin_constant_p(_wq == system_long_wq) &&             \
+            _wq == system_long_wq) ||                                  \
+           (__builtin_constant_p(_wq == system_unbound_wq) &&          \
+            _wq == system_unbound_wq) ||                               \
+           (__builtin_constant_p(_wq == system_freezable_wq) &&        \
+            _wq == system_freezable_wq) ||                             \
+           (__builtin_constant_p(_wq == system_power_efficient_wq) &&  \
+            _wq == system_power_efficient_wq) ||                       \
+           (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
+            _wq == system_freezable_power_efficient_wq))               \
+               __warn_flushing_systemwide_wq();                        \
+       __flush_workqueue(_wq);                                         \
+})
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
index 72feab5..c29e11b 100644 (file)
@@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
 void xas_init_marks(const struct xa_state *);
 
 bool xas_nomem(struct xa_state *, gfp_t);
+void xas_destroy(struct xa_state *);
 void xas_pause(struct xa_state *);
 
 void xas_create_range(struct xa_state *);
index 021778a..6484095 100644 (file)
@@ -612,5 +612,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
                                enum tc_setup_type type, void *data,
                                struct flow_block_offload *bo,
                                void (*cleanup)(struct flow_block_cb *block_cb));
+bool flow_indr_dev_exists(void);
 
 #endif /* _NET_FLOW_OFFLOAD_H */
index 5b38bf1..de9dcc5 100644 (file)
@@ -1063,7 +1063,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
 int ip6_append_data(struct sock *sk,
                    int getfrag(void *from, char *to, int offset, int len,
                                int odd, struct sk_buff *skb),
-                   void *from, int length, int transhdrlen,
+                   void *from, size_t length, int transhdrlen,
                    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
                    struct rt6_info *rt, unsigned int flags);
 
@@ -1079,7 +1079,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
 struct sk_buff *ip6_make_skb(struct sock *sk,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
-                            void *from, int length, int transhdrlen,
+                            void *from, size_t length, int transhdrlen,
                             struct ipcm6_cookie *ipc6,
                             struct rt6_info *rt, unsigned int flags,
                             struct inet_cork_full *cork);
index 20af9d3..279ae0f 100644 (file)
@@ -1090,7 +1090,6 @@ struct nft_stats {
 
 struct nft_hook {
        struct list_head        list;
-       bool                    inactive;
        struct nf_hook_ops      ops;
        struct rcu_head         rcu;
 };
index 7971478..3568b6a 100644 (file)
@@ -92,7 +92,7 @@ int nft_flow_rule_offload_commit(struct net *net);
        NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)         \
        memset(&(__reg)->mask, 0xff, (__reg)->len);
 
-int nft_chain_offload_priority(struct nft_base_chain *basechain);
+bool nft_chain_offload_support(const struct nft_base_chain *basechain);
 
 int nft_offload_init(void);
 void nft_offload_exit(void);
index 6154a2e..262d520 100644 (file)
@@ -22,7 +22,7 @@ struct pool_workqueue;
  */
 TRACE_EVENT(workqueue_queue_work,
 
-       TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
+       TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
                 struct work_struct *work),
 
        TP_ARGS(req_cpu, pwq, work),
@@ -31,8 +31,8 @@ TRACE_EVENT(workqueue_queue_work,
                __field( void *,        work    )
                __field( void *,        function)
                __string( workqueue,    pwq->wq->name)
-               __field( unsigned int,  req_cpu )
-               __field( unsigned int,  cpu     )
+               __field( int,   req_cpu )
+               __field( int,   cpu     )
        ),
 
        TP_fast_assign(
@@ -43,7 +43,7 @@ TRACE_EVENT(workqueue_queue_work,
                __entry->cpu            = pwq->pool->cpu;
        ),
 
-       TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
+       TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
                  __entry->work, __entry->function, __get_str(workqueue),
                  __entry->req_cpu, __entry->cpu)
 );
index ac39328..bb8f808 100644 (file)
@@ -39,7 +39,7 @@
 /* TLS socket options */
 #define TLS_TX                 1       /* Set transmit parameters */
 #define TLS_RX                 2       /* Set receive parameters */
-#define TLS_TX_ZEROCOPY_SENDFILE       3       /* transmit zerocopy sendfile */
+#define TLS_TX_ZEROCOPY_RO     3       /* TX zerocopy (only sendfile now) */
 
 /* Supported versions */
 #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
@@ -161,7 +161,7 @@ enum {
        TLS_INFO_CIPHER,
        TLS_INFO_TXCONF,
        TLS_INFO_RXCONF,
-       TLS_INFO_ZC_SENDFILE,
+       TLS_INFO_ZC_RO_TX,
        __TLS_INFO_MAX,
 };
 #define TLS_INFO_MAX (__TLS_INFO_MAX - 1)
diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h
new file mode 100644 (file)
index 0000000..b0766a6
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_XEN_OPS_H
+#define _ASM_ARM_XEN_OPS_H
+
+#include <xen/swiotlb-xen.h>
+#include <xen/xen-ops.h>
+
+static inline void xen_setup_dma_ops(struct device *dev)
+{
+#ifdef CONFIG_XEN
+       if (xen_is_grant_dma_device(dev))
+               xen_grant_setup_dma_ops(dev);
+       else if (xen_swiotlb_detect())
+               dev->dma_ops = &xen_swiotlb_dma_ops;
+#endif
+}
+
+#endif /* _ASM_ARM_XEN_OPS_H */
index 527c990..e279be3 100644 (file)
@@ -127,10 +127,14 @@ int gnttab_try_end_foreign_access(grant_ref_t ref);
  */
 int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
 
+int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
+
 void gnttab_free_grant_reference(grant_ref_t ref);
 
 void gnttab_free_grant_references(grant_ref_t head);
 
+void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
+
 int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
 
 int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
index c7c1b46..8054696 100644 (file)
@@ -214,4 +214,17 @@ static inline void xen_preemptible_hcall_end(void) { }
 
 #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
 
+#ifdef CONFIG_XEN_GRANT_DMA_OPS
+void xen_grant_setup_dma_ops(struct device *dev);
+bool xen_is_grant_dma_device(struct device *dev);
+#else
+static inline void xen_grant_setup_dma_ops(struct device *dev)
+{
+}
+static inline bool xen_is_grant_dma_device(struct device *dev)
+{
+       return false;
+}
+#endif /* CONFIG_XEN_GRANT_DMA_OPS */
+
 #endif /* INCLUDE_XEN_OPS_H */
index a99bab8..0780a81 100644 (file)
@@ -52,6 +52,14 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
 extern u64 xen_saved_max_mem_size;
 #endif
 
+#include <linux/platform-feature.h>
+
+static inline void xen_set_restricted_virtio_memory_access(void)
+{
+       if (IS_ENABLED(CONFIG_XEN_VIRTIO) && xen_domain())
+               platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS);
+}
+
 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
index c984afc..c7900e8 100644 (file)
@@ -885,6 +885,15 @@ config CC_IMPLICIT_FALLTHROUGH
        default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
        default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
 
+# Currently, disable gcc-12 array-bounds globally.
+# We may want to target only particular configurations some day.
+config GCC12_NO_ARRAY_BOUNDS
+       def_bool y
+
+config CC_NO_ARRAY_BOUNDS
+       bool
+       default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
+
 #
 # For architectures that know their GCC __int128 support is sound
 #
index 318789c..a7e1f49 100644 (file)
@@ -7,7 +7,7 @@ obj-y     = fork.o exec_domain.o panic.o \
            cpu.o exit.o softirq.o resource.o \
            sysctl.o capability.o ptrace.o user.o \
            signal.o sys.o umh.o workqueue.o pid.o task_work.o \
-           extable.o params.o \
+           extable.o params.o platform-feature.o \
            kthread.o sys_ni.o nsproxy.o \
            notifier.o ksysfs.o cred.o reboot.o \
            async.o range.o smpboot.o ucount.o regset.o
index 7bccaa4..63d0ac7 100644 (file)
@@ -6054,6 +6054,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
                                    struct bpf_reg_state *regs,
                                    bool ptr_to_mem_ok)
 {
+       enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
        struct bpf_verifier_log *log = &env->log;
        u32 i, nargs, ref_id, ref_obj_id = 0;
        bool is_kfunc = btf_is_kernel(btf);
@@ -6171,7 +6172,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
                                return -EINVAL;
                        }
                        /* rest of the arguments can be anything, like normal kfunc */
-               } else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) {
+               } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
                        /* If function expects ctx type in BTF check that caller
                         * is passing PTR_TO_CTX.
                         */
index ac74063..2caafd1 100644 (file)
@@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
 
        rc = active_cacheline_insert(entry);
        if (rc == -ENOMEM) {
-               pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
+               pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
                err_printk(entry->dev, entry,
index dfa1de8..cb50f8d 100644 (file)
@@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void)
 }
 
 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
-                                   unsigned long nslabs, bool late_alloc)
+               unsigned long nslabs, unsigned int flags, bool late_alloc)
 {
        void *vaddr = phys_to_virt(start);
        unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
@@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
        mem->index = 0;
        mem->late_alloc = late_alloc;
 
-       if (swiotlb_force_bounce)
-               mem->force_bounce = true;
+       mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
 
        spin_lock_init(&mem->lock);
        for (i = 0; i < mem->nslabs; i++) {
@@ -275,8 +274,7 @@ retry:
                panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
                      __func__, alloc_size, PAGE_SIZE);
 
-       swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
-       mem->force_bounce = flags & SWIOTLB_FORCE;
+       swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
 
        if (flags & SWIOTLB_VERBOSE)
                swiotlb_print_info();
@@ -348,7 +346,7 @@ retry:
 
        set_memory_decrypted((unsigned long)vstart,
                             (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
-       swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
+       swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
 
        swiotlb_print_info();
        return 0;
@@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
 
                set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
                                     rmem->size >> PAGE_SHIFT);
-               swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
-               mem->force_bounce = true;
+               swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
+                               false);
                mem->for_alloc = true;
 
                rmem->priv = mem;
index 9d09f48..2e0f75b 100644 (file)
@@ -9,12 +9,6 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
                int ret;
 
                if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
-                       clear_notify_signal();
-                       if (task_work_pending(current))
-                               task_work_run();
-               }
-
-               if (ti_work & _TIF_SIGPENDING) {
                        kvm_handle_signal_exit(vcpu);
                        return -EINTR;
                }
diff --git a/kernel/platform-feature.c b/kernel/platform-feature.c
new file mode 100644 (file)
index 0000000..cb6a6c3
--- /dev/null
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/export.h>
+#include <linux/platform-feature.h>
+
+#define PLATFORM_FEAT_ARRAY_SZ  BITS_TO_LONGS(PLATFORM_FEAT_N)
+static unsigned long __read_mostly platform_features[PLATFORM_FEAT_ARRAY_SZ];
+
+void platform_set(unsigned int feature)
+{
+       set_bit(feature, platform_features);
+}
+EXPORT_SYMBOL_GPL(platform_set);
+
+void platform_clear(unsigned int feature)
+{
+       clear_bit(feature, platform_features);
+}
+EXPORT_SYMBOL_GPL(platform_clear);
+
+bool platform_has(unsigned int feature)
+{
+       return test_bit(feature, platform_features);
+}
+EXPORT_SYMBOL_GPL(platform_has);
index a091145..b5a71d1 100644 (file)
@@ -315,6 +315,43 @@ static int sys_off_notify(struct notifier_block *nb,
        return handler->sys_off_cb(&data);
 }
 
+static struct sys_off_handler platform_sys_off_handler;
+
+static struct sys_off_handler *alloc_sys_off_handler(int priority)
+{
+       struct sys_off_handler *handler;
+       gfp_t flags;
+
+       /*
+        * Platforms like m68k can't allocate sys_off handler dynamically
+        * at the early boot time because memory allocator isn't available yet.
+        */
+       if (priority == SYS_OFF_PRIO_PLATFORM) {
+               handler = &platform_sys_off_handler;
+               if (handler->cb_data)
+                       return ERR_PTR(-EBUSY);
+       } else {
+               if (system_state > SYSTEM_RUNNING)
+                       flags = GFP_ATOMIC;
+               else
+                       flags = GFP_KERNEL;
+
+               handler = kzalloc(sizeof(*handler), flags);
+               if (!handler)
+                       return ERR_PTR(-ENOMEM);
+       }
+
+       return handler;
+}
+
+static void free_sys_off_handler(struct sys_off_handler *handler)
+{
+       if (handler == &platform_sys_off_handler)
+               memset(handler, 0, sizeof(*handler));
+       else
+               kfree(handler);
+}
+
 /**
  *     register_sys_off_handler - Register sys-off handler
  *     @mode: Sys-off mode
@@ -345,9 +382,9 @@ register_sys_off_handler(enum sys_off_mode mode,
        struct sys_off_handler *handler;
        int err;
 
-       handler = kzalloc(sizeof(*handler), GFP_KERNEL);
-       if (!handler)
-               return ERR_PTR(-ENOMEM);
+       handler = alloc_sys_off_handler(priority);
+       if (IS_ERR(handler))
+               return handler;
 
        switch (mode) {
        case SYS_OFF_MODE_POWER_OFF_PREPARE:
@@ -364,7 +401,7 @@ register_sys_off_handler(enum sys_off_mode mode,
                break;
 
        default:
-               kfree(handler);
+               free_sys_off_handler(handler);
                return ERR_PTR(-EINVAL);
        }
 
@@ -391,7 +428,7 @@ register_sys_off_handler(enum sys_off_mode mode,
        }
 
        if (err) {
-               kfree(handler);
+               free_sys_off_handler(handler);
                return ERR_PTR(err);
        }
 
@@ -409,7 +446,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler)
 {
        int err;
 
-       if (!handler)
+       if (IS_ERR_OR_NULL(handler))
                return;
 
        if (handler->blocking)
@@ -422,7 +459,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler)
        /* sanity check, shall never happen */
        WARN_ON(err);
 
-       kfree(handler);
+       free_sys_off_handler(handler);
 }
 EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
 
@@ -584,7 +621,23 @@ static void do_kernel_power_off_prepare(void)
  */
 void do_kernel_power_off(void)
 {
+       struct sys_off_handler *sys_off = NULL;
+
+       /*
+        * Register sys-off handlers for legacy PM callback. This allows
+        * legacy PM callbacks temporary co-exist with the new sys-off API.
+        *
+        * TODO: Remove legacy handlers once all legacy PM users will be
+        *       switched to the sys-off based APIs.
+        */
+       if (pm_power_off)
+               sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+                                                  SYS_OFF_PRIO_DEFAULT,
+                                                  legacy_pm_power_off, NULL);
+
        atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
+
+       unregister_sys_off_handler(sys_off);
 }
 
 /**
@@ -595,7 +648,8 @@ void do_kernel_power_off(void)
  */
 bool kernel_can_power_off(void)
 {
-       return !atomic_notifier_call_chain_is_empty(&power_off_handler_list);
+       return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
+               pm_power_off;
 }
 EXPORT_SYMBOL_GPL(kernel_can_power_off);
 
@@ -630,7 +684,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
                void __user *, arg)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(current);
-       struct sys_off_handler *sys_off = NULL;
        char buffer[256];
        int ret = 0;
 
@@ -655,21 +708,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
        if (ret)
                return ret;
 
-       /*
-        * Register sys-off handlers for legacy PM callback. This allows
-        * legacy PM callbacks temporary co-exist with the new sys-off API.
-        *
-        * TODO: Remove legacy handlers once all legacy PM users will be
-        *       switched to the sys-off based APIs.
-        */
-       if (pm_power_off) {
-               sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
-                                                  SYS_OFF_PRIO_DEFAULT,
-                                                  legacy_pm_power_off, NULL);
-               if (IS_ERR(sys_off))
-                       return PTR_ERR(sys_off);
-       }
-
        /* Instead of trying to make the power_off code look like
         * halt when pm_power_off is not set do it the easy way.
         */
@@ -727,7 +765,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
                break;
        }
        mutex_unlock(&system_transition_mutex);
-       unregister_sys_off_handler(sys_off);
        return ret;
 }
 
index 10b157a..7a13e6a 100644 (file)
@@ -2263,11 +2263,11 @@ static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32
        int err = -ENOMEM;
        unsigned int i;
 
-       syms = kvmalloc(cnt * sizeof(*syms), GFP_KERNEL);
+       syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
        if (!syms)
                goto error;
 
-       buf = kvmalloc(cnt * KSYM_NAME_LEN, GFP_KERNEL);
+       buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
        if (!buf)
                goto error;
 
@@ -2464,7 +2464,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
                return -EINVAL;
 
        size = cnt * sizeof(*addrs);
-       addrs = kvmalloc(size, GFP_KERNEL);
+       addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
        if (!addrs)
                return -ENOMEM;
 
@@ -2489,7 +2489,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
 
        ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
        if (ucookies) {
-               cookies = kvmalloc(size, GFP_KERNEL);
+               cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
                if (!cookies) {
                        err = -ENOMEM;
                        goto error;
index 4056f2a..1ea50f6 100644 (file)
@@ -2788,13 +2788,13 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
 }
 
 /**
- * flush_workqueue - ensure that any scheduled work has run to completion.
+ * __flush_workqueue - ensure that any scheduled work has run to completion.
  * @wq: workqueue to flush
  *
  * This function sleeps until all work items which were queued on entry
  * have finished execution, but it is not livelocked by new incoming ones.
  */
-void flush_workqueue(struct workqueue_struct *wq)
+void __flush_workqueue(struct workqueue_struct *wq)
 {
        struct wq_flusher this_flusher = {
                .list = LIST_HEAD_INIT(this_flusher.list),
@@ -2943,7 +2943,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 out_unlock:
        mutex_unlock(&wq->mutex);
 }
-EXPORT_SYMBOL(flush_workqueue);
+EXPORT_SYMBOL(__flush_workqueue);
 
 /**
  * drain_workqueue - drain a workqueue
@@ -2971,7 +2971,7 @@ void drain_workqueue(struct workqueue_struct *wq)
                wq->flags |= __WQ_DRAINING;
        mutex_unlock(&wq->mutex);
 reflush:
-       flush_workqueue(wq);
+       __flush_workqueue(wq);
 
        mutex_lock(&wq->mutex);
 
@@ -6111,3 +6111,11 @@ void __init workqueue_init(void)
        wq_online = true;
        wq_watchdog_init();
 }
+
+/*
+ * Despite the naming, this is a no-op function which is here only for avoiding
+ * link error. Since compile-time warning may fail to catch, we will need to
+ * emit run-time warning from __flush_workqueue().
+ */
+void __warn_flushing_systemwide_wq(void) { }
+EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
index 1974b35..1d26a16 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/module.h>
 #include <linux/crc-itu-t.h>
 
-/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */
+/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */
 const u16 crc_itu_t_table[256] = {
        0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
        0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
index 6dd5330..0b64695 100644 (file)
@@ -1434,7 +1434,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
 {
        unsigned nr, offset;
        pgoff_t index, count;
-       size_t size = maxsize, actual;
+       size_t size = maxsize;
        loff_t pos;
 
        if (!size || !maxpages)
@@ -1461,13 +1461,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
        if (nr == 0)
                return 0;
 
-       actual = PAGE_SIZE * nr;
-       actual -= offset;
-       if (nr == count && size > 0) {
-               unsigned last_offset = (nr > 1) ? 0 : offset;
-               actual -= PAGE_SIZE - (last_offset + size);
-       }
-       return actual;
+       return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
 }
 
 /* must be done on non-empty ITER_IOVEC one */
@@ -1602,7 +1596,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
        struct page **p;
        unsigned nr, offset;
        pgoff_t index, count;
-       size_t size = maxsize, actual;
+       size_t size = maxsize;
        loff_t pos;
 
        if (!size)
@@ -1631,13 +1625,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
        if (nr == 0)
                return 0;
 
-       actual = PAGE_SIZE * nr;
-       actual -= offset;
-       if (nr == count && size > 0) {
-               unsigned last_offset = (nr > 1) ? 0 : offset;
-               actual -= PAGE_SIZE - (last_offset + size);
-       }
-       return actual;
+       return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
 }
 
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
index fb77f7b..3c1853a 100644 (file)
@@ -769,8 +769,7 @@ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
                static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
                unsigned long flags;
 
-               if (!system_unbound_wq ||
-                   (!rng_is_initialized() && !rng_has_arch_random()) ||
+               if (!system_unbound_wq || !rng_is_initialized() ||
                    !spin_trylock_irqsave(&filling, flags))
                        return -EAGAIN;
 
index 54e646e..ea9ce1f 100644 (file)
@@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node)
  * xas_destroy() - Free any resources allocated during the XArray operation.
  * @xas: XArray operation state.
  *
- * This function is now internal-only.
+ * Most users will not need to call this function; it is called for you
+ * by xas_nomem().
  */
-static void xas_destroy(struct xa_state *xas)
+void xas_destroy(struct xa_state *xas)
 {
        struct xa_node *next, *node = xas->xa_alloc;
 
index 9daeaab..ac3775c 100644 (file)
@@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
        struct file *fpin = NULL;
+       unsigned long vm_flags = vmf->vma->vm_flags;
        unsigned int mmap_miss;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        /* Use the readahead code, even if readahead is disabled */
-       if (vmf->vma->vm_flags & VM_HUGEPAGE) {
+       if (vm_flags & VM_HUGEPAGE) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
                ra->size = HPAGE_PMD_NR;
@@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
                 * Fetch two PMD folios, so we get the chance to actually
                 * readahead, unless we've been told not to.
                 */
-               if (!(vmf->vma->vm_flags & VM_RAND_READ))
+               if (!(vm_flags & VM_RAND_READ))
                        ra->size *= 2;
                ra->async_size = HPAGE_PMD_NR;
                page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 #endif
 
        /* If we don't want any read-ahead, don't bother */
-       if (vmf->vma->vm_flags & VM_RAND_READ)
+       if (vm_flags & VM_RAND_READ)
                return fpin;
        if (!ra->ra_pages)
                return fpin;
 
-       if (vmf->vma->vm_flags & VM_SEQ_READ) {
+       if (vm_flags & VM_SEQ_READ) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                page_cache_sync_ra(&ractl, ra->ra_pages);
                return fpin;
index a77c78a..f724800 100644 (file)
@@ -2672,8 +2672,7 @@ out_unlock:
        if (mapping)
                i_mmap_unlock_read(mapping);
 out:
-       /* Free any memory we didn't use */
-       xas_nomem(&xas, 0);
+       xas_destroy(&xas);
        count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
        return ret;
 }
index 415c39d..57a0151 100644 (file)
@@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac)
                while ((folio = readahead_folio(rac)) != NULL) {
                        unsigned long nr = folio_nr_pages(folio);
 
+                       folio_get(folio);
                        rac->ra->size -= nr;
                        if (rac->ra->async_size >= nr) {
                                rac->ra->async_size -= nr;
                                filemap_remove_folio(folio);
                        }
                        folio_unlock(folio);
+                       folio_put(folio);
                }
        } else {
                while ((folio = readahead_folio(rac)) != NULL)
index 73f68d4..929f637 100644 (file)
@@ -595,3 +595,9 @@ int flow_indr_dev_setup_offload(struct net_device *dev,     struct Qdisc *sch,
        return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
 }
 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
+
+bool flow_indr_dev_exists(void)
+{
+       return !list_empty(&flow_block_indr_dev_list);
+}
+EXPORT_SYMBOL(flow_indr_dev_exists);
index e8de5e6..545f91b 100644 (file)
@@ -1026,10 +1026,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
        init_hashinfo_lhash2(h);
 
        /* this one is used for source ports of outgoing connections */
-       table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
-                                     sizeof(*table_perturb), GFP_KERNEL);
-       if (!table_perturb)
-               panic("TCP: failed to alloc table_perturb");
+       table_perturb = alloc_large_system_hash("Table-perturb",
+                                               sizeof(*table_perturb),
+                                               INET_TABLE_PERTURB_SIZE,
+                                               0, 0, NULL, NULL,
+                                               INET_TABLE_PERTURB_SIZE,
+                                               INET_TABLE_PERTURB_SIZE);
 }
 
 int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
index 7e474a8..3b9cd48 100644 (file)
@@ -629,21 +629,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
        }
 
        if (dev->header_ops) {
-               const int pull_len = tunnel->hlen + sizeof(struct iphdr);
-
                if (skb_cow_head(skb, 0))
                        goto free_skb;
 
                tnl_params = (const struct iphdr *)skb->data;
 
-               if (pull_len > skb_transport_offset(skb))
-                       goto free_skb;
-
                /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
                 * to gre header.
                 */
-               skb_pull(skb, pull_len);
+               skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
                skb_reset_mac_header(skb);
+
+               if (skb->ip_summed == CHECKSUM_PARTIAL &&
+                   skb_checksum_start(skb) < skb->data)
+                       goto free_skb;
        } else {
                if (skb_cow_head(skb, dev->needed_headroom))
                        goto free_skb;
index 2fe5860..b146ce8 100644 (file)
@@ -304,4 +304,3 @@ void __init xfrm4_protocol_init(void)
 {
        xfrm_input_register_afinfo(&xfrm4_input_afinfo);
 }
-EXPORT_SYMBOL(xfrm4_protocol_init);
index 4081b12..77e3f59 100644 (file)
@@ -1450,7 +1450,7 @@ static int __ip6_append_data(struct sock *sk,
                             struct page_frag *pfrag,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
-                            void *from, int length, int transhdrlen,
+                            void *from, size_t length, int transhdrlen,
                             unsigned int flags, struct ipcm6_cookie *ipc6)
 {
        struct sk_buff *skb, *skb_prev = NULL;
@@ -1798,7 +1798,7 @@ error:
 int ip6_append_data(struct sock *sk,
                    int getfrag(void *from, char *to, int offset, int len,
                                int odd, struct sk_buff *skb),
-                   void *from, int length, int transhdrlen,
+                   void *from, size_t length, int transhdrlen,
                    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
                    struct rt6_info *rt, unsigned int flags)
 {
@@ -1995,7 +1995,7 @@ EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
 struct sk_buff *ip6_make_skb(struct sock *sk,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
-                            void *from, int length, int transhdrlen,
+                            void *from, size_t length, int transhdrlen,
                             struct ipcm6_cookie *ipc6, struct rt6_info *rt,
                             unsigned int flags, struct inet_cork_full *cork)
 {
index 29bc4e7..6de0118 100644 (file)
@@ -399,7 +399,6 @@ int __init seg6_hmac_init(void)
 {
        return seg6_hmac_init_algo();
 }
-EXPORT_SYMBOL(seg6_hmac_init);
 
 int __net_init seg6_hmac_net_init(struct net *net)
 {
index 9fbe243..98a3428 100644 (file)
@@ -218,6 +218,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
        struct flowi6 fl6;
        int dev_flags = 0;
 
+       memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_iif = skb->dev->ifindex;
        fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
        fl6.saddr = hdr->saddr;
index c6ff8bf..9dbd801 100644 (file)
@@ -504,14 +504,15 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct ipcm6_cookie ipc6;
        int addr_len = msg->msg_namelen;
        int transhdrlen = 4; /* zero session-id */
-       int ulen = len + transhdrlen;
+       int ulen;
        int err;
 
        /* Rough check on arithmetic overflow,
         * better check is made in ip6_append_data().
         */
-       if (len > INT_MAX)
+       if (len > INT_MAX - transhdrlen)
                return -EMSGSIZE;
+       ulen = len + transhdrlen;
 
        /* Mirror BSD error message compatibility */
        if (msg->msg_flags & MSG_OOB)
index 746be13..51144fc 100644 (file)
@@ -544,6 +544,7 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
        if (msg_type == NFT_MSG_NEWFLOWTABLE)
                nft_activate_next(ctx->net, flowtable);
 
+       INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
        nft_trans_flowtable(trans) = flowtable;
        nft_trans_commit_list_add_tail(ctx->net, trans);
 
@@ -1914,7 +1915,6 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
                goto err_hook_dev;
        }
        hook->ops.dev = dev;
-       hook->inactive = false;
 
        return hook;
 
@@ -2166,7 +2166,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
        chain->flags |= NFT_CHAIN_BASE | flags;
        basechain->policy = NF_ACCEPT;
        if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
-           nft_chain_offload_priority(basechain) < 0)
+           !nft_chain_offload_support(basechain))
                return -EOPNOTSUPP;
 
        flow_block_init(&basechain->flow_block);
@@ -7332,7 +7332,7 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net,
                nf_unregister_net_hook(net, &hook->ops);
                if (release_netdev) {
                        list_del(&hook->list);
-                       kfree_rcu(hook);
+                       kfree_rcu(hook, rcu);
                }
        }
 }
@@ -7433,11 +7433,15 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
 
        if (nla[NFTA_FLOWTABLE_FLAGS]) {
                flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
-               if (flags & ~NFT_FLOWTABLE_MASK)
-                       return -EOPNOTSUPP;
+               if (flags & ~NFT_FLOWTABLE_MASK) {
+                       err = -EOPNOTSUPP;
+                       goto err_flowtable_update_hook;
+               }
                if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
-                   (flags & NFT_FLOWTABLE_HW_OFFLOAD))
-                       return -EOPNOTSUPP;
+                   (flags & NFT_FLOWTABLE_HW_OFFLOAD)) {
+                       err = -EOPNOTSUPP;
+                       goto err_flowtable_update_hook;
+               }
        } else {
                flags = flowtable->data.flags;
        }
@@ -7618,6 +7622,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
 {
        const struct nlattr * const *nla = ctx->nla;
        struct nft_flowtable_hook flowtable_hook;
+       LIST_HEAD(flowtable_del_list);
        struct nft_hook *this, *hook;
        struct nft_trans *trans;
        int err;
@@ -7633,7 +7638,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
                        err = -ENOENT;
                        goto err_flowtable_del_hook;
                }
-               hook->inactive = true;
+               list_move(&hook->list, &flowtable_del_list);
        }
 
        trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
@@ -7646,6 +7651,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
        nft_trans_flowtable(trans) = flowtable;
        nft_trans_flowtable_update(trans) = true;
        INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
+       list_splice(&flowtable_del_list, &nft_trans_flowtable_hooks(trans));
        nft_flowtable_hook_release(&flowtable_hook);
 
        nft_trans_commit_list_add_tail(ctx->net, trans);
@@ -7653,13 +7659,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
        return 0;
 
 err_flowtable_del_hook:
-       list_for_each_entry(this, &flowtable_hook.list, list) {
-               hook = nft_hook_list_find(&flowtable->hook_list, this);
-               if (!hook)
-                       break;
-
-               hook->inactive = false;
-       }
+       list_splice(&flowtable_del_list, &flowtable->hook_list);
        nft_flowtable_hook_release(&flowtable_hook);
 
        return err;
@@ -8329,6 +8329,9 @@ static void nft_commit_release(struct nft_trans *trans)
                nf_tables_chain_destroy(&trans->ctx);
                break;
        case NFT_MSG_DELRULE:
+               if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+                       nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+
                nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
                break;
        case NFT_MSG_DELSET:
@@ -8563,17 +8566,6 @@ void nft_chain_del(struct nft_chain *chain)
        list_del_rcu(&chain->list);
 }
 
-static void nft_flowtable_hooks_del(struct nft_flowtable *flowtable,
-                                   struct list_head *hook_list)
-{
-       struct nft_hook *hook, *next;
-
-       list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
-               if (hook->inactive)
-                       list_move(&hook->list, hook_list);
-       }
-}
-
 static void nf_tables_module_autoload_cleanup(struct net *net)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
@@ -8828,6 +8820,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nf_tables_rule_notify(&trans->ctx,
                                              nft_trans_rule(trans),
                                              NFT_MSG_NEWRULE);
+                       if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+                               nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_DELRULE:
@@ -8918,8 +8913,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        break;
                case NFT_MSG_DELFLOWTABLE:
                        if (nft_trans_flowtable_update(trans)) {
-                               nft_flowtable_hooks_del(nft_trans_flowtable(trans),
-                                                       &nft_trans_flowtable_hooks(trans));
                                nf_tables_flowtable_notify(&trans->ctx,
                                                           nft_trans_flowtable(trans),
                                                           &nft_trans_flowtable_hooks(trans),
@@ -9000,7 +8993,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
        struct nftables_pernet *nft_net = nft_pernet(net);
        struct nft_trans *trans, *next;
        struct nft_trans_elem *te;
-       struct nft_hook *hook;
 
        if (action == NFNL_ABORT_VALIDATE &&
            nf_tables_validate(net) < 0)
@@ -9131,8 +9123,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                        break;
                case NFT_MSG_DELFLOWTABLE:
                        if (nft_trans_flowtable_update(trans)) {
-                               list_for_each_entry(hook, &nft_trans_flowtable(trans)->hook_list, list)
-                                       hook->inactive = false;
+                               list_splice(&nft_trans_flowtable_hooks(trans),
+                                           &nft_trans_flowtable(trans)->hook_list);
                        } else {
                                trans->ctx.table->use++;
                                nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
index 2d36952..910ef88 100644 (file)
@@ -208,7 +208,7 @@ static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
        return 0;
 }
 
-int nft_chain_offload_priority(struct nft_base_chain *basechain)
+static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
 {
        if (basechain->ops.priority <= 0 ||
            basechain->ops.priority > USHRT_MAX)
@@ -217,6 +217,27 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain)
        return 0;
 }
 
+bool nft_chain_offload_support(const struct nft_base_chain *basechain)
+{
+       struct net_device *dev;
+       struct nft_hook *hook;
+
+       if (nft_chain_offload_priority(basechain) < 0)
+               return false;
+
+       list_for_each_entry(hook, &basechain->hook_list, list) {
+               if (hook->ops.pf != NFPROTO_NETDEV ||
+                   hook->ops.hooknum != NF_NETDEV_INGRESS)
+                       return false;
+
+               dev = hook->ops.dev;
+               if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists())
+                       return false;
+       }
+
+       return true;
+}
+
 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
                                       const struct nft_base_chain *basechain,
                                       const struct nft_rule *rule,
index 4394df4..e5fd699 100644 (file)
@@ -335,7 +335,8 @@ static void nft_nat_inet_eval(const struct nft_expr *expr,
 {
        const struct nft_nat *priv = nft_expr_priv(expr);
 
-       if (priv->family == nft_pf(pkt))
+       if (priv->family == nft_pf(pkt) ||
+           priv->family == NFPROTO_INET)
                nft_nat_eval(expr, regs, pkt);
 }
 
index 1b5d730..868db46 100644 (file)
@@ -373,6 +373,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
        update_ip_l4_checksum(skb, nh, *addr, new_addr);
        csum_replace4(&nh->check, *addr, new_addr);
        skb_clear_hash(skb);
+       ovs_ct_clear(skb, NULL);
        *addr = new_addr;
 }
 
@@ -420,6 +421,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
                update_ipv6_checksum(skb, l4_proto, addr, new_addr);
 
        skb_clear_hash(skb);
+       ovs_ct_clear(skb, NULL);
        memcpy(addr, new_addr, sizeof(__be32[4]));
 }
 
@@ -660,6 +662,7 @@ static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
 static void set_tp_port(struct sk_buff *skb, __be16 *port,
                        __be16 new_port, __sum16 *check)
 {
+       ovs_ct_clear(skb, NULL);
        inet_proto_csum_replace2(check, skb, *port, new_port, false);
        *port = new_port;
 }
@@ -699,6 +702,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
                uh->dest = dst;
                flow_key->tp.src = src;
                flow_key->tp.dst = dst;
+               ovs_ct_clear(skb, NULL);
        }
 
        skb_clear_hash(skb);
@@ -761,6 +765,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
        sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
 
        skb_clear_hash(skb);
+       ovs_ct_clear(skb, NULL);
+
        flow_key->tp.src = sh->source;
        flow_key->tp.dst = sh->dest;
 
index 4a947c1..4e70df9 100644 (file)
@@ -1342,7 +1342,9 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
 
        nf_ct_put(ct);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
-       ovs_ct_fill_key(skb, key, false);
+
+       if (key)
+               ovs_ct_fill_key(skb, key, false);
 
        return 0;
 }
index df194cc..f87a2d8 100644 (file)
@@ -919,7 +919,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
 EXPORT_SYMBOL_GPL(xdr_init_encode);
 
 /**
- * xdr_commit_encode - Ensure all data is written to buffer
+ * __xdr_commit_encode - Ensure all data is written to buffer
  * @xdr: pointer to xdr_stream
  *
  * We handle encoding across page boundaries by giving the caller a
@@ -931,26 +931,29 @@ EXPORT_SYMBOL_GPL(xdr_init_encode);
  * required at the end of encoding, or any other time when the xdr_buf
  * data might be read.
  */
-inline void xdr_commit_encode(struct xdr_stream *xdr)
+void __xdr_commit_encode(struct xdr_stream *xdr)
 {
-       int shift = xdr->scratch.iov_len;
+       size_t shift = xdr->scratch.iov_len;
        void *page;
 
-       if (shift == 0)
-               return;
        page = page_address(*xdr->page_ptr);
        memcpy(xdr->scratch.iov_base, page, shift);
        memmove(page, page + shift, (void *)xdr->p - page);
        xdr_reset_scratch_buffer(xdr);
 }
-EXPORT_SYMBOL_GPL(xdr_commit_encode);
+EXPORT_SYMBOL_GPL(__xdr_commit_encode);
 
-static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
-               size_t nbytes)
+/*
+ * The buffer space to be reserved crosses the boundary between
+ * xdr->buf->head and xdr->buf->pages, or between two pages
+ * in xdr->buf->pages.
+ */
+static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
+                                                  size_t nbytes)
 {
-       __be32 *p;
        int space_left;
        int frag1bytes, frag2bytes;
+       void *p;
 
        if (nbytes > PAGE_SIZE)
                goto out_overflow; /* Bigger buffers require special handling */
@@ -964,6 +967,7 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
                xdr->buf->page_len += frag1bytes;
        xdr->page_ptr++;
        xdr->iov = NULL;
+
        /*
         * If the last encode didn't end exactly on a page boundary, the
         * next one will straddle boundaries.  Encode into the next
@@ -972,14 +976,19 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
         * space at the end of the previous buffer:
         */
        xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
-       p = page_address(*xdr->page_ptr);
+
        /*
-        * Note this is where the next encode will start after we've
-        * shifted this one back:
+        * xdr->p is where the next encode will start after
+        * xdr_commit_encode() has shifted this one back:
         */
-       xdr->p = (void *)p + frag2bytes;
+       p = page_address(*xdr->page_ptr);
+       xdr->p = p + frag2bytes;
        space_left = xdr->buf->buflen - xdr->buf->len;
-       xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
+       if (space_left - nbytes >= PAGE_SIZE)
+               xdr->end = p + PAGE_SIZE;
+       else
+               xdr->end = p + space_left - frag1bytes;
+
        xdr->buf->page_len += frag2bytes;
        xdr->buf->len += nbytes;
        return p;
index 5f0155f..11cf7c6 100644 (file)
@@ -478,10 +478,10 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
                unsigned int write_len;
                u64 offset;
 
-               seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
-               if (!seg)
+               if (info->wi_seg_no >= info->wi_chunk->ch_segcount)
                        goto out_overflow;
 
+               seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
                write_len = min(remaining, seg->rs_length - info->wi_seg_off);
                if (!write_len)
                        goto out_overflow;
index b91ddc1..da17641 100644 (file)
@@ -544,7 +544,7 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
                rc = do_tls_getsockopt_conf(sk, optval, optlen,
                                            optname == TLS_TX);
                break;
-       case TLS_TX_ZEROCOPY_SENDFILE:
+       case TLS_TX_ZEROCOPY_RO:
                rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
                break;
        default:
@@ -731,7 +731,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
                                            optname == TLS_TX);
                release_sock(sk);
                break;
-       case TLS_TX_ZEROCOPY_SENDFILE:
+       case TLS_TX_ZEROCOPY_RO:
                lock_sock(sk);
                rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
                release_sock(sk);
@@ -970,7 +970,7 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
                goto nla_failure;
 
        if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
-               err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE);
+               err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX);
                if (err)
                        goto nla_failure;
        }
@@ -994,7 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk)
                nla_total_size(sizeof(u16)) +   /* TLS_INFO_CIPHER */
                nla_total_size(sizeof(u16)) +   /* TLS_INFO_RXCONF */
                nla_total_size(sizeof(u16)) +   /* TLS_INFO_TXCONF */
-               nla_total_size(0) +             /* TLS_INFO_ZC_SENDFILE */
+               nla_total_size(0) +             /* TLS_INFO_ZC_RO_TX */
                0;
 
        return size;
index 654dcef..2206e6f 100644 (file)
@@ -490,7 +490,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
         * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
         * to other and its full, we will hang waiting for POLLOUT.
         */
-       if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
+       if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
                return 1;
 
        if (connected)
index e0a4526..19ac872 100644 (file)
@@ -373,7 +373,8 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
                goto out;
        }
 
-       nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries);
+       max_entries = xskq_cons_nb_entries(xs->tx, max_entries);
+       nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries);
        if (!nb_pkts) {
                xs->tx->queue_empty_descs++;
                goto out;
@@ -389,7 +390,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries)
        if (!nb_pkts)
                goto out;
 
-       xskq_cons_release_n(xs->tx, nb_pkts);
+       xskq_cons_release_n(xs->tx, max_entries);
        __xskq_cons_release(xs->tx);
        xs->sk.sk_write_space(&xs->sk);
 
index a794410..fb20bf7 100644 (file)
@@ -282,14 +282,6 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
        return xskq_cons_read_desc(q, desc, pool);
 }
 
-static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
-                                           u32 max)
-{
-       u32 entries = xskq_cons_nb_entries(q, max);
-
-       return xskq_cons_read_desc_batch(q, pool, entries);
-}
-
 /* To improve performance in the xskq_cons_release functions, only update local state here.
  * Reflect this to global state when we get new entries from the ring in
  * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
index 1f01ac6..cac070a 100644 (file)
@@ -251,8 +251,8 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
 
 # To make this rule robust against "Argument list too long" error,
 # ensure to add $(obj)/ prefix by a shell command.
-cmd_mod = echo $(call real-search, $*.o, .o, -objs -y -m) | \
-       $(AWK) -v RS='( |\n)' '!x[$$0]++ { print("$(obj)/"$$0) }' > $@
+cmd_mod = printf '%s\n' $(call real-search, $*.o, .o, -objs -y -m) | \
+       $(AWK) '!x[$$0]++ { print("$(obj)/"$$0) }' > $@
 
 $(obj)/%.mod: FORCE
        $(call if_changed,mod)
index da745e2..6ccc2f4 100755 (executable)
@@ -8,11 +8,31 @@
 
 set -e
 
+# catch errors from ${NM}
+set -o pipefail
+
+# Run the last element of a pipeline in the current shell.
+# Without this, the while-loop would be executed in a subshell, and
+# the changes made to 'symbol_types' and 'export_symbols' would be lost.
+shopt -s lastpipe
+
 declare -A symbol_types
 declare -a export_symbols
 
 exit_code=0
 
+# If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm) shows
+# 'no symbols' diagnostic (but exits with 0). It is harmless and hidden by
+# '2>/dev/null'. However, it suppresses real error messages as well. Add a
+# hand-crafted error message here.
+#
+# TODO:
+# Use --quiet instead of 2>/dev/null when we upgrade the minimum version of
+# binutils to 2.37, llvm to 13.0.0.
+# Then, the following line will be really simple:
+#   ${NM} --quiet ${1} |
+
+{ ${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; false; } } |
 while read value type name
 do
        # Skip the line if the number of fields is less than 3.
@@ -37,21 +57,7 @@ do
        if [[ ${name} == __ksymtab_* ]]; then
                export_symbols+=(${name#__ksymtab_})
        fi
-
-       # If there is no symbol in the object, ${NM} (both GNU nm and llvm-nm)
-       # shows 'no symbols' diagnostic (but exits with 0). It is harmless and
-       # hidden by '2>/dev/null'. However, it suppresses real error messages
-       # as well. Add a hand-crafted error message here.
-       #
-       # Use --quiet instead of 2>/dev/null when we upgrade the minimum version
-       # of binutils to 2.37, llvm to 13.0.0.
-       #
-       # Then, the following line will be really simple:
-       #   done < <(${NM} --quiet ${1})
-done < <(${NM} ${1} 2>/dev/null || { echo "${0}: ${NM} failed" >&2; false; } )
-
-# Catch error in the process substitution
-wait $!
+done
 
 for name in "${export_symbols[@]}"
 do
index 90e1565..8843ab3 100644 (file)
@@ -24,9 +24,9 @@ class LxConfigDump(gdb.Command):
             filename = arg
 
         try:
-            py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8")
-            py_config_size = gdb.parse_and_eval(
-                    "sizeof(kernel_config_data) - 1 - 8 * 2")
+            py_config_ptr = gdb.parse_and_eval("&kernel_config_data")
+            py_config_ptr_end = gdb.parse_and_eval("&kernel_config_data_end")
+            py_config_size = py_config_ptr_end - py_config_ptr
         except gdb.error as e:
             raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?")
 
index 04c4b96..f1718cc 100644 (file)
@@ -34,9 +34,8 @@ generate_deps() {
        local mod=${1%.ko:}
        shift
        local namespaces="$*"
-       local mod_source_files="`cat $mod.mod | sed -n 1p                      \
-                                             | sed -e 's/\.o/\.c/g'           \
-                                             | sed "s|[^ ]* *|${src_prefix}&|g"`"
+       local mod_source_files=$(sed "s|^\(.*\)\.o$|${src_prefix}\1.c|" $mod.mod)
+
        for ns in $namespaces; do
                echo "Adding namespace $ns to module $mod.ko."
                generate_deps_for_ns $ns "$mod_source_files"
index fbd34b8..7434e9e 100644 (file)
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
+/*
+ * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
+ *
+ * Remove this if/when that API is no longer used
+ */
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
 /*
  * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
  * assume that it's not available and its header file is missing and that we
index 0165da3..2b2c8eb 100644 (file)
@@ -283,8 +283,8 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
        /* key properties */
        flags = 0;
        flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH;
-       flags |= payload->migratable ? (TPM2_OA_FIXED_TPM |
-                                       TPM2_OA_FIXED_PARENT) : 0;
+       flags |= payload->migratable ? 0 : (TPM2_OA_FIXED_TPM |
+                                           TPM2_OA_FIXED_PARENT);
        tpm_buf_append_u32(&buf, flags);
 
        /* policy */
index 3e9e9ac..b7e5032 100644 (file)
@@ -660,6 +660,7 @@ static const struct hda_vendor_id hda_vendor_ids[] = {
        { 0x14f1, "Conexant" },
        { 0x17e8, "Chrontel" },
        { 0x1854, "LG" },
+       { 0x19e5, "Huawei" },
        { 0x1aec, "Wolfson Microelectronics" },
        { 0x1af4, "QEMU" },
        { 0x434d, "C-Media" },
index 0a83eb6..a77165b 100644 (file)
@@ -2525,6 +2525,9 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        { PCI_DEVICE(0x8086, 0x51cf),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Meteorlake-P */
+       { PCI_DEVICE(0x8086, 0x7e28),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
index aa360a0..1248d1a 100644 (file)
@@ -1052,6 +1052,13 @@ static int patch_conexant_auto(struct hda_codec *codec)
                snd_hda_pick_fixup(codec, cxt5051_fixup_models,
                                   cxt5051_fixups, cxt_fixups);
                break;
+       case 0x14f15098:
+               codec->pin_amp_workaround = 1;
+               spec->gen.mixer_nid = 0x22;
+               spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
+               snd_hda_pick_fixup(codec, cxt5066_fixup_models,
+                                  cxt5066_fixups, cxt_fixups);
+               break;
        case 0x14f150f2:
                codec->power_save_node = 1;
                fallthrough;
index 31fe417..6c209cd 100644 (file)
@@ -4554,6 +4554,7 @@ HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI",    patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
 HDA_CODEC_ENTRY(0x8086281f, "Raptorlake-P HDMI",       patch_i915_adlp_hdmi),
+HDA_CODEC_ENTRY(0x8086281d, "Meteorlake HDMI", patch_i915_adlp_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index f3ad454..b0f9541 100644 (file)
@@ -443,6 +443,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0245:
        case 0x10ec0255:
        case 0x10ec0256:
+       case 0x19e58326:
        case 0x10ec0257:
        case 0x10ec0282:
        case 0x10ec0283:
@@ -580,6 +581,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
        switch (codec->core.vendor_id) {
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
@@ -3247,6 +3249,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_write_coef_idx(codec, 0x48, 0x0);
                alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
                break;
@@ -3275,6 +3278,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_write_coef_idx(codec, 0x48, 0xd011);
                alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
                break;
@@ -4910,6 +4914,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_process_coef_fw(codec, coef0256);
                break;
        case 0x10ec0234:
@@ -5025,6 +5030,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_write_coef_idx(codec, 0x45, 0xc489);
                snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
                alc_process_coef_fw(codec, coef0256);
@@ -5175,6 +5181,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_write_coef_idx(codec, 0x1b, 0x0e4b);
                alc_write_coef_idx(codec, 0x45, 0xc089);
                msleep(50);
@@ -5274,6 +5281,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_process_coef_fw(codec, coef0256);
                break;
        case 0x10ec0234:
@@ -5388,6 +5396,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_process_coef_fw(codec, coef0256);
                break;
        case 0x10ec0234:
@@ -5489,6 +5498,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_write_coef_idx(codec, 0x1b, 0x0e4b);
                alc_write_coef_idx(codec, 0x06, 0x6104);
                alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
@@ -5783,6 +5793,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_process_coef_fw(codec, alc256fw);
                break;
        }
@@ -6385,6 +6396,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
        case 0x10ec0236:
        case 0x10ec0255:
        case 0x10ec0256:
+       case 0x19e58326:
                alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
                alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
                break;
@@ -9059,6 +9071,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -9258,6 +9271,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+       SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
@@ -10095,6 +10109,7 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0230:
        case 0x10ec0236:
        case 0x10ec0256:
+       case 0x19e58326:
                spec->codec_variant = ALC269_TYPE_ALC256;
                spec->shutup = alc256_shutup;
                spec->init_hook = alc256_init;
@@ -11545,6 +11560,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
+       HDA_CODEC_ENTRY(0x19e58326, "HW8326", patch_alc269),
        {} /* terminator */
 };
 MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_realtek);
index 920190d..dfe85dc 100644 (file)
@@ -444,7 +444,8 @@ static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg)
        }
 }
 
-static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0);
+static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 912,
+                                 TLV_DB_MINMAX_ITEM(-10200, 1200));
 static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
 
 static const char * const cs35l36_pcm_sftramp_text[] =  {
index aff6185..0e93318 100644 (file)
@@ -143,7 +143,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
                        0, 0xA0, 96, adc_att_tlv),
        SOC_DOUBLE_R_SX_TLV("PGA Volume",
                        CS42L51_ALC_PGA_CTL, CS42L51_ALC_PGB_CTL,
-                       0, 0x1A, 30, pga_tlv),
+                       0, 0x19, 30, pga_tlv),
        SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0),
        SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0),
        SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0),
index 9b182b5..10e6964 100644 (file)
@@ -137,7 +137,9 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0);
 
 static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
 
-static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
+static DECLARE_TLV_DB_SCALE(pass_tlv, -6000, 50, 0);
+
+static DECLARE_TLV_DB_SCALE(mix_tlv, -5150, 50, 0);
 
 static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
 
@@ -351,7 +353,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
                              CS42L52_SPKB_VOL, 0, 0x40, 0xC0, hl_tlv),
 
        SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
-                             CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pga_tlv),
+                             CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pass_tlv),
 
        SOC_DOUBLE("Bypass Mute", CS42L52_MISC_CTL, 4, 5, 1, 0),
 
@@ -364,7 +366,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
                              CS42L52_ADCB_VOL, 0, 0xA0, 0x78, ipd_tlv),
        SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume",
                             CS42L52_ADCA_MIXER_VOL, CS42L52_ADCB_MIXER_VOL,
-                               0, 0x19, 0x7F, ipd_tlv),
+                               0, 0x19, 0x7F, mix_tlv),
 
        SOC_DOUBLE("ADC Switch", CS42L52_ADC_MISC_CTL, 0, 1, 1, 0),
 
index dc23007..510c942 100644 (file)
@@ -391,9 +391,9 @@ static const struct snd_kcontrol_new cs42l56_snd_controls[] = {
        SOC_DOUBLE("ADC Boost Switch", CS42L56_GAIN_BIAS_CTL, 3, 2, 1, 1),
 
        SOC_DOUBLE_R_SX_TLV("Headphone Volume", CS42L56_HPA_VOLUME,
-                             CS42L56_HPB_VOLUME, 0, 0x84, 0x48, hl_tlv),
+                             CS42L56_HPB_VOLUME, 0, 0x44, 0x48, hl_tlv),
        SOC_DOUBLE_R_SX_TLV("LineOut Volume", CS42L56_LOA_VOLUME,
-                             CS42L56_LOB_VOLUME, 0, 0x84, 0x48, hl_tlv),
+                             CS42L56_LOB_VOLUME, 0, 0x44, 0x48, hl_tlv),
 
        SOC_SINGLE_TLV("Bass Shelving Volume", CS42L56_TONE_CTL,
                        0, 0x00, 1, tone_tlv),
index 7035452..360ca2f 100644 (file)
@@ -348,22 +348,22 @@ static const struct snd_kcontrol_new cs53l30_snd_controls[] = {
        SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum),
 
        SOC_SINGLE_SX_TLV("ADC1A PGA Volume",
-                   CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+                   CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
        SOC_SINGLE_SX_TLV("ADC1B PGA Volume",
-                   CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+                   CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
        SOC_SINGLE_SX_TLV("ADC2A PGA Volume",
-                   CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+                   CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
        SOC_SINGLE_SX_TLV("ADC2B PGA Volume",
-                   CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+                   CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
 
        SOC_SINGLE_SX_TLV("ADC1A Digital Volume",
-                   CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+                   CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
        SOC_SINGLE_SX_TLV("ADC1B Digital Volume",
-                   CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+                   CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
        SOC_SINGLE_SX_TLV("ADC2A Digital Volume",
-                   CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+                   CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
        SOC_SINGLE_SX_TLV("ADC2B Digital Volume",
-                   CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+                   CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
 };
 
 static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = {
index 3f00ead..dd53dfd 100644 (file)
@@ -161,13 +161,16 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
        if (deemph > 1)
                return -EINVAL;
 
+       if (es8328->deemph == deemph)
+               return 0;
+
        ret = es8328_set_deemph(component);
        if (ret < 0)
                return ret;
 
        es8328->deemph = deemph;
 
-       return 0;
+       return 1;
 }
 
 
index 66bbd8f..08f6c56 100644 (file)
@@ -740,6 +740,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
                pll_param->pll_int, pll_param->pll_frac,
                pll_param->mclk_scaler, pll_param->pre_factor);
 
+       snd_soc_component_update_bits(component,
+               NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_OFF);
        snd_soc_component_update_bits(component,
                NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK,
                (pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) |
@@ -757,6 +759,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
                pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT);
        snd_soc_component_update_bits(component,
                NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL);
+       snd_soc_component_update_bits(component,
+               NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_ON);
 
        return 0;
 }
index 489191f..b45d42c 100644 (file)
@@ -90,6 +90,9 @@
 #define NAU8822_REFIMP_3K                      0x3
 #define NAU8822_IOBUF_EN                       (0x1 << 2)
 #define NAU8822_ABIAS_EN                       (0x1 << 3)
+#define NAU8822_PLL_EN_MASK                    (0x1 << 5)
+#define NAU8822_PLL_ON                         (0x1 << 5)
+#define NAU8822_PLL_OFF                                (0x0 << 5)
 
 /* NAU8822_REG_AUDIO_INTERFACE (0x4) */
 #define NAU8822_AIFMT_MASK                     (0x3 << 3)
index 34cd5a2..5cca893 100644 (file)
@@ -3868,6 +3868,7 @@ static int wm8962_runtime_suspend(struct device *dev)
 #endif
 
 static const struct dev_pm_ops wm8962_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL)
 };
 
index 7973a75..6d7fd88 100644 (file)
@@ -333,7 +333,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        struct wm_adsp *dsp = snd_soc_component_get_drvdata(component);
-       int ret = 0;
+       int ret = 1;
 
        if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw)
                return 0;
index fa950dd..e765da9 100644 (file)
@@ -1293,6 +1293,7 @@ static const struct of_device_id fsl_sai_ids[] = {
        { .compatible = "fsl,imx8mm-sai", .data = &fsl_sai_imx8mm_data },
        { .compatible = "fsl,imx8mp-sai", .data = &fsl_sai_imx8mp_data },
        { .compatible = "fsl,imx8ulp-sai", .data = &fsl_sai_imx8ulp_data },
+       { .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mp_data },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, fsl_sai_ids);
index e71d74e..f4192df 100644 (file)
@@ -54,22 +54,29 @@ static struct snd_soc_dai_link_component cs35l41_components[] = {
        },
 };
 
+/*
+ * Mapping between ACPI instance id and speaker position.
+ *
+ * Four speakers:
+ *         0: Tweeter left, 1: Woofer left
+ *         2: Tweeter right, 3: Woofer right
+ */
 static struct snd_soc_codec_conf cs35l41_codec_conf[] = {
        {
                .dlc = COMP_CODEC_CONF(CS35L41_DEV0_NAME),
-               .name_prefix = "WL",
+               .name_prefix = "TL",
        },
        {
                .dlc = COMP_CODEC_CONF(CS35L41_DEV1_NAME),
-               .name_prefix = "WR",
+               .name_prefix = "WL",
        },
        {
                .dlc = COMP_CODEC_CONF(CS35L41_DEV2_NAME),
-               .name_prefix = "TL",
+               .name_prefix = "TR",
        },
        {
                .dlc = COMP_CODEC_CONF(CS35L41_DEV3_NAME),
-               .name_prefix = "TR",
+               .name_prefix = "WR",
        },
 };
 
@@ -101,6 +108,21 @@ static int cs35l41_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
+/*
+ * Channel map:
+ *
+ * TL/WL: ASPRX1 on slot 0, ASPRX2 on slot 1 (default)
+ * TR/WR: ASPRX1 on slot 1, ASPRX2 on slot 0
+ */
+static const struct {
+       unsigned int rx[2];
+} cs35l41_channel_map[] = {
+       {.rx = {0, 1}}, /* TL */
+       {.rx = {0, 1}}, /* WL */
+       {.rx = {1, 0}}, /* TR */
+       {.rx = {1, 0}}, /* WR */
+};
+
 static int cs35l41_hw_params(struct snd_pcm_substream *substream,
                             struct snd_pcm_hw_params *params)
 {
@@ -134,6 +156,16 @@ static int cs35l41_hw_params(struct snd_pcm_substream *substream,
                                ret);
                        return ret;
                }
+
+               /* setup channel map */
+               ret = snd_soc_dai_set_channel_map(codec_dai, 0, NULL,
+                                                 ARRAY_SIZE(cs35l41_channel_map[i].rx),
+                                                 (unsigned int *)cs35l41_channel_map[i].rx);
+               if (ret < 0) {
+                       dev_err(codec_dai->dev, "fail to set channel map, ret %d\n",
+                               ret);
+                       return ret;
+               }
        }
 
        return 0;
index f03a7ae..b41ab7a 100644 (file)
@@ -898,7 +898,7 @@ static int lpass_platform_cdc_dma_mmap(struct snd_pcm_substream *substream,
        struct snd_pcm_runtime *runtime = substream->runtime;
        unsigned long size, offset;
 
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        size = vma->vm_end - vma->vm_start;
        offset = vma->vm_pgoff << PAGE_SHIFT;
        return io_remap_pfn_range(vma, vma->vm_start,
index 8d74063..2897609 100644 (file)
@@ -318,7 +318,7 @@ sink_prepare:
                        p->walking = false;
                        if (ret < 0) {
                                /* unprepare the source widget */
-                               if (!widget_ops[widget->id].ipc_unprepare && swidget->prepared) {
+                               if (widget_ops[widget->id].ipc_unprepare && swidget->prepared) {
                                        widget_ops[widget->id].ipc_unprepare(swidget);
                                        swidget->prepared = false;
                                }
index 03490a4..6bdfa52 100644 (file)
@@ -150,7 +150,7 @@ static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *bu
 {
        struct sof_client_dev *cdev = file->private_data;
        struct sof_msg_inject_priv *priv = cdev->data;
-       size_t size;
+       ssize_t size;
        int ret;
 
        if (*ppos)
@@ -158,8 +158,10 @@ static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *bu
 
        size = simple_write_to_buffer(priv->tx_buffer, priv->max_msg_size,
                                      ppos, buffer, count);
+       if (size < 0)
+               return size;
        if (size != count)
-               return size > 0 ? -EFAULT : size;
+               return -EFAULT;
 
        memset(priv->rx_buffer, 0, priv->max_msg_size);
 
@@ -179,7 +181,7 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file,
        struct sof_client_dev *cdev = file->private_data;
        struct sof_msg_inject_priv *priv = cdev->data;
        struct sof_ipc4_msg *ipc4_msg = priv->tx_buffer;
-       size_t size;
+       ssize_t size;
        int ret;
 
        if (*ppos)
@@ -192,18 +194,20 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file,
        size = simple_write_to_buffer(&ipc4_msg->header_u64,
                                      sizeof(ipc4_msg->header_u64),
                                      ppos, buffer, count);
+       if (size < 0)
+               return size;
        if (size != sizeof(ipc4_msg->header_u64))
-               return size > 0 ? -EFAULT : size;
+               return -EFAULT;
 
        count -= size;
-       if (!count) {
-               /* Copy the payload */
-               size = simple_write_to_buffer(ipc4_msg->data_ptr,
-                                             priv->max_msg_size, ppos, buffer,
-                                             count);
-               if (size != count)
-                       return size > 0 ? -EFAULT : size;
-       }
+       /* Copy the payload */
+       size = simple_write_to_buffer(ipc4_msg->data_ptr,
+                                     priv->max_msg_size, ppos, buffer,
+                                     count);
+       if (size < 0)
+               return size;
+       if (size != count)
+               return -EFAULT;
 
        ipc4_msg->data_size = count;
 
index b470404..e692ae0 100644 (file)
@@ -291,6 +291,9 @@ int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip,
        bool is_playback;
        int err;
 
+       if (fmt->sync_ep)
+               return 0; /* already set up */
+
        alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting);
        if (!alts)
                return 0;
@@ -304,7 +307,7 @@ int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip,
         * Generic sync EP handling
         */
 
-       if (altsd->bNumEndpoints < 2)
+       if (fmt->ep_idx > 0 || altsd->bNumEndpoints < 2)
                return 0;
 
        is_playback = !(get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN);
index 78eb41b..4f56e17 100644 (file)
@@ -2658,7 +2658,12 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                                        .nr_rates = 2,
                                        .rate_table = (unsigned int[]) {
                                                44100, 48000
-                                       }
+                                       },
+                                       .sync_ep = 0x82,
+                                       .sync_iface = 0,
+                                       .sync_altsetting = 1,
+                                       .sync_ep_idx = 1,
+                                       .implicit_fb = 1,
                                }
                        },
                        {
index d9aad15..02bb8cb 100644 (file)
@@ -395,6 +395,18 @@ static void test_func_map_prog_compatibility(void)
                                     "./test_attach_probe.o");
 }
 
+static void test_func_replace_global_func(void)
+{
+       const char *prog_name[] = {
+               "freplace/test_pkt_access",
+       };
+
+       test_fexit_bpf2bpf_common("./freplace_global_func.o",
+                                 "./test_pkt_access.o",
+                                 ARRAY_SIZE(prog_name),
+                                 prog_name, false, NULL);
+}
+
 /* NOTE: affect other tests, must run in serial mode */
 void serial_test_fexit_bpf2bpf(void)
 {
@@ -416,4 +428,6 @@ void serial_test_fexit_bpf2bpf(void)
                test_func_replace_multi();
        if (test__start_subtest("fmod_ret_freplace"))
                test_fmod_ret_freplace();
+       if (test__start_subtest("func_replace_global_func"))
+               test_func_replace_global_func();
 }
diff --git a/tools/testing/selftests/bpf/progs/freplace_global_func.c b/tools/testing/selftests/bpf/progs/freplace_global_func.c
new file mode 100644 (file)
index 0000000..96cb61a
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__noinline
+int test_ctx_global_func(struct __sk_buff *skb)
+{
+       volatile int retval = 1;
+       return retval;
+}
+
+SEC("freplace/test_pkt_access")
+int new_test_pkt_access(struct __sk_buff *skb)
+{
+       return test_ctx_global_func(skb);
+}
+
+char _license[] SEC("license") = "GPL";
index e0b2bb1..3330fb1 100644 (file)
@@ -44,7 +44,7 @@ static inline void nop_loop(void)
 {
        int i;
 
-       for (i = 0; i < 1000000; i++)
+       for (i = 0; i < 100000000; i++)
                asm volatile("nop");
 }
 
@@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void)
        tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
        GUEST_ASSERT(tsc_freq > 0);
 
-       /* First, check MSR-based clocksource */
+       /* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
        r1 = rdtsc();
        t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+       r1 = (r1 + rdtsc()) / 2;
        nop_loop();
        r2 = rdtsc();
        t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+       r2 = (r2 + rdtsc()) / 2;
 
        GUEST_ASSERT(r2 > r1 && t2 > t1);
 
@@ -181,12 +183,14 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
        tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
        TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
 
-       /* First, check MSR-based clocksource */
+       /* For increased accuracy, take mean rdtsc() before and afrer ioctl */
        r1 = rdtsc();
        t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
+       r1 = (r1 + rdtsc()) / 2;
        nop_loop();
        r2 = rdtsc();
        t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
+       r2 = (r2 + rdtsc()) / 2;
 
        TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
 
index f91bf14..8a69c91 100644 (file)
@@ -2,6 +2,7 @@
 
 CLANG ?= clang
 CCINCLUDE += -I../../bpf
+CCINCLUDE += -I../../../lib
 CCINCLUDE += -I../../../../../usr/include/
 
 TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
@@ -10,5 +11,4 @@ all: $(TEST_CUSTOM_PROGS)
 $(OUTPUT)/%.o: %.c
        $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
 
-clean:
-       rm -f $(TEST_CUSTOM_PROGS)
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
index eb8543b..924ecb3 100755 (executable)
@@ -374,6 +374,45 @@ EOF
        return $lret
 }
 
+test_local_dnat_portonly()
+{
+       local family=$1
+       local daddr=$2
+       local lret=0
+       local sr_s
+       local sr_r
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family nat {
+       chain output {
+               type nat hook output priority 0; policy accept;
+               meta l4proto tcp dnat to :2000
+
+       }
+}
+EOF
+       if [ $? -ne 0 ]; then
+               if [ $family = "inet" ];then
+                       echo "SKIP: inet port test"
+                       test_inet_nat=false
+                       return
+               fi
+               echo "SKIP: Could not add $family dnat hook"
+               return
+       fi
+
+       echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
+       sc_s=$!
+
+       result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
+
+       if [ "$result" = "SERVER-inet" ];then
+               echo "PASS: inet port rewrite without l3 address"
+       else
+               echo "ERROR: inet port rewrite"
+               ret=1
+       fi
+}
 
 test_masquerade6()
 {
@@ -1148,6 +1187,10 @@ fi
 reset_counters
 test_local_dnat ip
 test_local_dnat6 ip6
+
+reset_counters
+test_local_dnat_portonly inet 10.0.1.99
+
 reset_counters
 $test_inet_nat && test_local_dnat inet
 $test_inet_nat && test_local_dnat6 inet
index bca07b9..7d1b809 100644 (file)
@@ -64,8 +64,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
 else
-QEMU_MACHINE := -cpu cortex-a53 -machine virt
-CFLAGS += -march=armv8-a -mtune=cortex-a53
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv8-a
 endif
 else ifeq ($(ARCH),aarch64_be)
 CHOST := aarch64_be-linux-musl
@@ -76,8 +76,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
 else
-QEMU_MACHINE := -cpu cortex-a53 -machine virt
-CFLAGS += -march=armv8-a -mtune=cortex-a53
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv8-a
 endif
 else ifeq ($(ARCH),arm)
 CHOST := arm-linux-musleabi
@@ -88,8 +88,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
 else
-QEMU_MACHINE := -cpu cortex-a15 -machine virt
-CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv7-a -mabi=aapcs-linux
 endif
 else ifeq ($(ARCH),armeb)
 CHOST := armeb-linux-musleabi
@@ -100,8 +100,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
 else
-QEMU_MACHINE := -cpu cortex-a15 -machine virt
-CFLAGS += -march=armv7-a -mabi=aapcs-linux # We don't pass -mtune=cortex-a15 due to a compiler bug on big endian.
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv7-a -mabi=aapcs-linux
 LDFLAGS += -Wl,--be8
 endif
 else ifeq ($(ARCH),x86_64)
@@ -112,8 +112,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host -machine q35,accel=kvm
 else
-QEMU_MACHINE := -cpu Skylake-Server -machine q35
-CFLAGS += -march=skylake-avx512
+QEMU_MACHINE := -cpu max -machine q35
 endif
 else ifeq ($(ARCH),i686)
 CHOST := i686-linux-musl
@@ -123,8 +122,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
 ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
 QEMU_MACHINE := -cpu host -machine q35,accel=kvm
 else
-QEMU_MACHINE := -cpu coreduo -machine q35
-CFLAGS += -march=prescott
+QEMU_MACHINE := -cpu max -machine q35
 endif
 else ifeq ($(ARCH),mips64)
 CHOST := mips64-linux-musl
@@ -182,7 +180,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host,accel=kvm -machine pseries
 else
-QEMU_MACHINE := -machine pseries
+QEMU_MACHINE := -machine pseries -device spapr-rng,rng=rng -object rng-random,id=rng
 endif
 else ifeq ($(ARCH),powerpc64le)
 CHOST := powerpc64le-linux-musl
@@ -192,7 +190,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host,accel=kvm -machine pseries
 else
-QEMU_MACHINE := -machine pseries
+QEMU_MACHINE := -machine pseries -device spapr-rng,rng=rng -object rng-random,id=rng
 endif
 else ifeq ($(ARCH),powerpc)
 CHOST := powerpc-linux-musl
@@ -247,7 +245,7 @@ QEMU_VPORT_RESULT := virtio-serial-ccw
 ifeq ($(HOST_ARCH),$(ARCH))
 QEMU_MACHINE := -cpu host,accel=kvm -machine s390-ccw-virtio -append $(KERNEL_CMDLINE)
 else
-QEMU_MACHINE := -machine s390-ccw-virtio -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -cpu max -machine s390-ccw-virtio -append $(KERNEL_CMDLINE)
 endif
 else
 $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64, powerpc64le, powerpc, m68k, riscv64, riscv32, s390x)
index 2a0f48f..c9e1284 100644 (file)
@@ -21,6 +21,7 @@
 #include <sys/utsname.h>
 #include <sys/sendfile.h>
 #include <sys/sysmacros.h>
+#include <sys/random.h>
 #include <linux/random.h>
 #include <linux/version.h>
 
@@ -58,6 +59,8 @@ static void seed_rng(void)
 {
        int bits = 256, fd;
 
+       if (!getrandom(NULL, 0, GRND_NONBLOCK))
+               return;
        pretty_message("[+] Fake seeding RNG...");
        fd = open("/dev/random", O_WRONLY);
        if (fd < 0)
index a9b5a52..bad88f4 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_TTY=y
 CONFIG_BINFMT_ELF=y
 CONFIG_BINFMT_SCRIPT=y
 CONFIG_VDSO=y
+CONFIG_STRICT_KERNEL_RWX=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_HYPERVISOR_GUEST=y
 CONFIG_PARAVIRT=y
@@ -65,6 +66,8 @@ CONFIG_PROC_FS=y
 CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
+CONFIG_RANDOM_TRUST_CPU=y
+CONFIG_RANDOM_TRUST_BOOTLOADER=y
 CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_PRINTK_TIME=y
index 64ec222..44c4767 100644 (file)
@@ -4300,8 +4300,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
                kvm_put_kvm_no_destroy(kvm);
                mutex_lock(&kvm->lock);
                list_del(&dev->vm_node);
+               if (ops->release)
+                       ops->release(dev);
                mutex_unlock(&kvm->lock);
-               ops->destroy(dev);
+               if (ops->destroy)
+                       ops->destroy(dev);
                return ret;
        }