Merge tag 'for-linus-20190802' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 2 Aug 2019 21:31:26 +0000 (14:31 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 2 Aug 2019 21:31:26 +0000 (14:31 -0700)
Pull block fixes from Jens Axboe:
 "Here's a small collection of fixes that should go into this series.
  This contains:

   - io_uring potential use-after-free fix (Jackie)

   - loop regression fix (Jan)

   - O_DIRECT fragmented bio regression fix (Damien)

   - Mark Denis as the new floppy maintainer (Denis)

   - ataflop switch fall-through annotation (Gustavo)

   - libata zpodd overflow fix (Kees)

   - libata ahci deferred probe fix (Miquel)

   - nbd invalidation BUG_ON() fix (Munehisa)

   - dasd endless loop fix (Stefan)"

* tag 'for-linus-20190802' of git://git.kernel.dk/linux-block:
  s390/dasd: fix endless loop after read unit address configuration
  block: Fix __blkdev_direct_IO() for bio fragments
  MAINTAINERS: floppy: take over maintainership
  nbd: replace kill_bdev() with __invalidate_device() again
  ata: libahci: do not complain in case of deferred probe
  io_uring: fix KASAN use after free in io_sq_wq_submit_work
  loop: Fix mount(2) failure due to race with LOOP_SET_FD
  libata: zpodd: Fix small read overflow in zpodd_get_mech_type()
  ataflop: Mark expected switch fall-through

113 files changed:
Documentation/vm/hmm.rst
arch/arm/include/asm/dma-mapping.h
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
drivers/acpi/device_pm.c
drivers/bluetooth/hci_ath.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_mrvl.c
drivers/bluetooth/hci_qca.c
drivers/bluetooth/hci_uart.h
drivers/char/ipmi/ipmb_dev_int.c
drivers/clk/at91/clk-generated.c
drivers/clk/mediatek/clk-mt8183.c
drivers/clk/renesas/renesas-cpg-mssr.c
drivers/clk/sprd/Kconfig
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/counters.c
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/bnxt_re/qplib_sp.h
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/Kconfig
drivers/infiniband/hw/hns/Makefile
drivers/infiniband/hw/hns/hns_roce_db.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/sw/siw/siw_cm.c
drivers/infiniband/sw/siw/siw_main.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/iommu/virtio-iommu.c
drivers/md/dm-table.c
drivers/mmc/core/queue.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/sdhci-sprd.c
drivers/platform/olpc/olpc-xo175-ec.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/pcengines-apuv2.c
drivers/vhost/vhost.h
fs/btrfs/backref.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/dax.c
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/super.c
fs/gfs2/bmap.c
fs/super.c
include/linux/clk.h
include/linux/gpio/consumer.h
include/linux/hmm.h
include/linux/mod_devicetable.h
include/rdma/ib_verbs.h
include/rdma/rdmavt_qp.h
include/trace/events/dma_fence.h
include/trace/events/napi.h
include/trace/events/qdisc.h
include/trace/events/tegra_apb_dma.h
include/uapi/linux/virtio_iommu.h
kernel/dma/contiguous.c
kernel/dma/mapping.c
kernel/exit.c
kernel/signal.c
kernel/trace/trace_functions_graph.c
mm/balloon_compaction.c
mm/hmm.c
mm/slub.c
sound/core/pcm_native.c
sound/hda/hdac_i915.c
sound/usb/helper.c
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
tools/testing/selftests/kmod/kmod.sh
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/pidfd/pidfd_test.c
tools/testing/selftests/x86/test_vsyscall.c

index 7d90964..710ce1c 100644 (file)
@@ -237,7 +237,7 @@ The usage pattern is::
       ret = hmm_range_snapshot(&range);
       if (ret) {
           up_read(&mm->mmap_sem);
-          if (ret == -EAGAIN) {
+          if (ret == -EBUSY) {
             /*
              * No need to check hmm_range_wait_until_valid() return value
              * on retry we will get proper error with hmm_range_snapshot()
index 7e0486a..dba9355 100644 (file)
@@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
+       if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
+               return &arm_dma_ops;
+       return NULL;
 }
 
 #ifdef __arch_page_to_dma
index 820b60a..c54cd7e 100644 (file)
@@ -663,6 +663,11 @@ config ARM_LPAE
        depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \
                !CPU_32v4 && !CPU_32v3
        select PHYS_ADDR_T_64BIT
+       select SWIOTLB
+       select ARCH_HAS_DMA_COHERENT_TO_PFN
+       select ARCH_HAS_DMA_MMAP_PGPROT
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       select ARCH_HAS_SYNC_DMA_FOR_CPU
        help
          Say Y if you have an ARMv7 processor supporting the LPAE page
          table format and you would like to access memory beyond the
index 4789c60..6774b03 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
 #include <linux/memblock.h>
@@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask)
 
 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
 {
+       /*
+        * When CONFIG_ARM_LPAE is set, physical address can extend above
+        * 32-bits, which then can't be addressed by devices that only support
+        * 32-bit DMA.
+        * Use the generic dma-direct / swiotlb ops code in that case, as that
+        * handles bounce buffering for us.
+        *
+        * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
+        * latter is also selected by the Xen code, but that code for now relies
+        * on non-NULL dev_dma_ops.  To be cleaned up later.
+        */
+       if (IS_ENABLED(CONFIG_ARM_LPAE))
+               return NULL;
        return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
 }
 
@@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        const struct dma_map_ops *dma_ops;
 
        dev->archdata.dma_coherent = coherent;
+#ifdef CONFIG_SWIOTLB
+       dev->dma_coherent = coherent;
+#endif
 
        /*
         * Don't override the dma_ops if they have already been set. Ideally
@@ -2363,3 +2380,47 @@ void arch_teardown_dma_ops(struct device *dev)
        /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
        set_dma_ops(dev, NULL);
 }
+
+#ifdef CONFIG_SWIOTLB
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
+{
+       __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+                             size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
+{
+       __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+                             size, dir);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+               dma_addr_t dma_addr)
+{
+       return dma_to_pfn(dev, dma_addr);
+}
+
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+               unsigned long attrs)
+{
+       if (!dev_is_dma_coherent(dev))
+               return __get_dma_pgprot(attrs, prot);
+       return prot;
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       return __dma_alloc(dev, size, dma_handle, gfp,
+                          __get_dma_pgprot(attrs, PAGE_KERNEL), false,
+                          attrs, __builtin_return_address(0));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_handle, unsigned long attrs)
+{
+       __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
+}
+#endif /* CONFIG_SWIOTLB */
index 4920a20..16d373d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-contiguous.h>
 #include <linux/sizes.h>
 #include <linux/stop_machine.h>
+#include <linux/swiotlb.h>
 
 #include <asm/cp15.h>
 #include <asm/mach-types.h>
@@ -463,6 +464,10 @@ static void __init free_highpages(void)
  */
 void __init mem_init(void)
 {
+#ifdef CONFIG_ARM_LPAE
+       swiotlb_init(1);
+#endif
+
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
        /* this will put all unused low memory onto the freelists */
index 28cffaa..f616b16 100644 (file)
@@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                if (device->power.flags.power_resources)
                        result = acpi_power_transition(device, target_state);
        } else {
+               int cur_state = device->power.state;
+
                if (device->power.flags.power_resources) {
                        result = acpi_power_transition(device, ACPI_STATE_D0);
                        if (result)
                                goto end;
                }
 
-               if (device->power.state == ACPI_STATE_D0) {
+               if (cur_state == ACPI_STATE_D0) {
                        int psc;
 
                        /* Nothing to do here if _PSC is not present. */
index a55be20..dbfe346 100644 (file)
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        ath = kzalloc(sizeof(*ath), GFP_KERNEL);
        if (!ath)
                return -ENOMEM;
index 8905ad2..ae2624f 100644 (file)
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
 
        bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
        if (!bcm)
                return -ENOMEM;
index 207bae5..31f2515 100644 (file)
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        intel = kzalloc(sizeof(*intel), GFP_KERNEL);
        if (!intel)
                return -ENOMEM;
index 8950e07..85a30fb 100644 (file)
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+       /* serdev nodes check if the needed operations are present */
+       if (hu->serdev)
+               return true;
+
+       if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+               return true;
+
+       return false;
+}
+
 /* Flow control or un-flow control the device */
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
 {
index f98e5cc..fbc3f7c 100644 (file)
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
        if (!mrvl)
                return -ENOMEM;
index 9a5c9c1..82a0a36 100644 (file)
@@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu)
 
        BT_DBG("hu %p qca_open", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
        if (!qca)
                return -ENOMEM;
index f11af39..6ab6311 100644 (file)
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
 void hci_uart_init_work(struct work_struct *work);
 void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
 void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
                         unsigned int oper_speed);
index 5720433..285e0b8 100644 (file)
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
        struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
        struct ipmb_request_elem *queue_elem;
        struct ipmb_msg msg;
-       ssize_t ret;
+       ssize_t ret = 0;
 
        memset(&msg, 0, sizeof(msg));
 
index 44db83a..44a46dc 100644 (file)
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
                        continue;
 
                div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+               if (div > GENERATED_MAX_DIV + 1)
+                       div = GENERATED_MAX_DIV + 1;
 
                clk_generated_best_diff(req, parent, parent_rate, div,
                                        &best_diff, &best_rate);
index 1aa5f40..73b7e23 100644 (file)
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
        FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
 };
 
+static const struct mtk_fixed_factor top_early_divs[] = {
+       FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
+};
+
 static const struct mtk_fixed_factor top_divs[] = {
-       FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
-               2),
        FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
                2),
        FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
        return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 }
 
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt8183_top_init_early(struct device_node *node)
+{
+       int i;
+
+       top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+       for (i = 0; i < CLK_TOP_NR_CLK; i++)
+               top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+       mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+                       top_clk_data);
+
+       of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
+                       clk_mt8183_top_init_early);
+
 static int clk_mt8183_top_probe(struct platform_device *pdev)
 {
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        void __iomem *base;
-       struct clk_onecell_data *clk_data;
        struct device_node *node = pdev->dev.of_node;
 
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
-       clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
        mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
-               clk_data);
+               top_clk_data);
+
+       mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+               top_clk_data);
 
-       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
 
        mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
-               node, &mt8183_clk_lock, clk_data);
+               node, &mt8183_clk_lock, top_clk_data);
 
        mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
-               base, &mt8183_clk_lock, clk_data);
+               base, &mt8183_clk_lock, top_clk_data);
 
        mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
-               base, &mt8183_clk_lock, clk_data);
+               base, &mt8183_clk_lock, top_clk_data);
 
        mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
-               clk_data);
+               top_clk_data);
 
-       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
 }
 
 static int clk_mt8183_infra_probe(struct platform_device *pdev)
index 52bbb9c..d4075b1 100644 (file)
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
        unsigned int reg = id / 32;
        unsigned int bit = id % 32;
        u32 bitmask = BIT(bit);
-       unsigned long flags;
-       u32 value;
 
        dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
 
        /* Reset module */
-       spin_lock_irqsave(&priv->rmw_lock, flags);
-       value = readl(priv->base + SRCR(reg));
-       value |= bitmask;
-       writel(value, priv->base + SRCR(reg));
-       spin_unlock_irqrestore(&priv->rmw_lock, flags);
+       writel(bitmask, priv->base + SRCR(reg));
 
        /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
        udelay(35);
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
        unsigned int reg = id / 32;
        unsigned int bit = id % 32;
        u32 bitmask = BIT(bit);
-       unsigned long flags;
-       u32 value;
 
        dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
 
-       spin_lock_irqsave(&priv->rmw_lock, flags);
-       value = readl(priv->base + SRCR(reg));
-       value |= bitmask;
-       writel(value, priv->base + SRCR(reg));
-       spin_unlock_irqrestore(&priv->rmw_lock, flags);
+       writel(bitmask, priv->base + SRCR(reg));
        return 0;
 }
 
index 91d3d72..3c219af 100644 (file)
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
        tristate "Clock support for Spreadtrum SoCs"
        depends on ARCH_SPRD || COMPILE_TEST
        default ARCH_SPRD
+       select REGMAP_MMIO
 
 if SPRD_COMMON_CLK
 
index 3ee99d0..f497003 100644 (file)
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        }
 
        if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
-               irqflags |= IRQF_TRIGGER_RISING;
+               irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+                       IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
        if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
-               irqflags |= IRQF_TRIGGER_FALLING;
+               irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+                       IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
        irqflags |= IRQF_ONESHOT;
 
        INIT_KFIFO(le->events);
@@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        for (i = 0; i < chip->ngpio; i++) {
                struct gpio_desc *desc = &gdev->descs[i];
 
-               if (chip->get_direction && gpiochip_line_is_valid(chip, i))
-                       desc->flags = !chip->get_direction(chip, i) ?
-                                       (1 << FLAG_IS_OUT) : 0;
-               else
-                       desc->flags = !chip->direction_input ?
-                                       (1 << FLAG_IS_OUT) : 0;
+               if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+                       if (!chip->get_direction(chip, i))
+                               set_bit(FLAG_IS_OUT, &desc->flags);
+                       else
+                               clear_bit(FLAG_IS_OUT, &desc->flags);
+               } else {
+                       if (!chip->direction_input)
+                               set_bit(FLAG_IS_OUT, &desc->flags);
+                       else
+                               clear_bit(FLAG_IS_OUT, &desc->flags);
+               }
        }
 
        acpi_gpiochip_add(chip);
index 1d3ee9c..6a5c96e 100644 (file)
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                        adev->asic_type != CHIP_FIJI &&
                        adev->asic_type != CHIP_POLARIS10 &&
                        adev->asic_type != CHIP_POLARIS11 &&
-                       adev->asic_type != CHIP_POLARIS12) ?
+                       adev->asic_type != CHIP_POLARIS12 &&
+                       adev->asic_type != CHIP_VEGAM) ?
                        VI_BO_SIZE_ALIGN : 1;
 
        mapping_flags = AMDGPU_VM_PAGE_READABLE;
index e069de8..4e4094f 100644 (file)
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
                        return r;
                }
 
-               fence = amdgpu_ctx_get_fence(ctx, entity,
-                                            deps[i].handle);
+               fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+               amdgpu_ctx_put(ctx);
+
+               if (IS_ERR(fence))
+                       return PTR_ERR(fence);
+               else if (!fence)
+                       continue;
 
                if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
-                       struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+                       struct drm_sched_fence *s_fence;
                        struct dma_fence *old = fence;
 
+                       s_fence = to_drm_sched_fence(fence);
                        fence = dma_fence_get(&s_fence->scheduled);
                        dma_fence_put(old);
                }
 
-               if (IS_ERR(fence)) {
-                       r = PTR_ERR(fence);
-                       amdgpu_ctx_put(ctx);
+               r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+               dma_fence_put(fence);
+               if (r)
                        return r;
-               } else if (fence) {
-                       r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
-                                       true);
-                       dma_fence_put(fence);
-                       amdgpu_ctx_put(ctx);
-                       if (r)
-                               return r;
-               }
        }
        return 0;
 }
index 6d54dec..5652cc7 100644 (file)
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
        bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
-       data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+       data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
index 03ca8c6..2b54656 100644 (file)
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type pm;
 
-       if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
-               pm = amdgpu_smu_get_current_power_state(adev);
-       else if (adev->powerplay.pp_funcs->get_current_power_state)
+       if (is_support_sw_smu(adev)) {
+               if (adev->smu.ppt_funcs->get_current_power_state)
+                       pm = amdgpu_smu_get_current_power_state(adev);
+               else
+                       pm = adev->pm.dpm.user_state;
+       } else if (adev->powerplay.pp_funcs->get_current_power_state) {
                pm = amdgpu_dpm_get_current_power_state(adev);
-       else
+       } else {
                pm = adev->pm.dpm.user_state;
+       }
 
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
                goto fail;
        }
 
-       if (adev->powerplay.pp_funcs->dispatch_tasks) {
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               adev->pm.dpm.user_state = state;
+               mutex_unlock(&adev->pm.mutex);
+       } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
                amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
        } else {
                mutex_lock(&adev->pm.mutex);
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
        if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
                seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
 
-       /* UVD clocks */
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
-               if (!value) {
-                       seq_printf(m, "UVD: Disabled\n");
-               } else {
-                       seq_printf(m, "UVD: Enabled\n");
-                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
-                               seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
-                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
-                               seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+       if (adev->asic_type > CHIP_VEGA20) {
+               /* VCN clocks */
+               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
+                       if (!value) {
+                               seq_printf(m, "VCN: Disabled\n");
+                       } else {
+                               seq_printf(m, "VCN: Enabled\n");
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+                       }
                }
-       }
-       seq_printf(m, "\n");
+               seq_printf(m, "\n");
+       } else {
+               /* UVD clocks */
+               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
+                       if (!value) {
+                               seq_printf(m, "UVD: Disabled\n");
+                       } else {
+                               seq_printf(m, "UVD: Enabled\n");
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+                       }
+               }
+               seq_printf(m, "\n");
 
-       /* VCE clocks */
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
-               if (!value) {
-                       seq_printf(m, "VCE: Disabled\n");
-               } else {
-                       seq_printf(m, "VCE: Enabled\n");
-                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
-                               seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+               /* VCE clocks */
+               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
+                       if (!value) {
+                               seq_printf(m, "VCE: Disabled\n");
+                       } else {
+                               seq_printf(m, "VCE: Enabled\n");
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+                       }
                }
        }
 
index 9f661bf..5b1ebb7 100644 (file)
@@ -123,6 +123,7 @@ enum amd_pp_sensors {
        AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
        AMDGPU_PP_SENSOR_MIN_FAN_RPM,
        AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+       AMDGPU_PP_SENSOR_VCN_POWER_STATE,
 };
 
 enum amd_pp_task {
index c097113..0685a33 100644 (file)
@@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
 
        /* not support power state */
        memset(state_info, 0, sizeof(struct pp_states_info));
-       state_info->nums = 0;
+       state_info->nums = 1;
+       state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
 
        return 0;
 }
@@ -337,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
                *size = 4;
                break;
+       case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -723,6 +728,12 @@ static int smu_sw_init(void *handle)
                return ret;
        }
 
+       ret = smu_register_irq_handler(smu);
+       if (ret) {
+               pr_err("Failed to register smc irq handler!\n");
+               return ret;
+       }
+
        return 0;
 }
 
@@ -732,6 +743,9 @@ static int smu_sw_fini(void *handle)
        struct smu_context *smu = &adev->smu;
        int ret;
 
+       kfree(smu->irq_source);
+       smu->irq_source = NULL;
+
        ret = smu_smc_table_sw_fini(smu);
        if (ret) {
                pr_err("Failed to sw fini smc table!\n");
@@ -1088,10 +1102,6 @@ static int smu_hw_init(void *handle)
        if (ret)
                goto failed;
 
-       ret = smu_register_irq_handler(smu);
-       if (ret)
-               goto failed;
-
        if (!smu->pm_enabled)
                adev->pm.dpm_enabled = false;
        else
@@ -1121,9 +1131,6 @@ static int smu_hw_fini(void *handle)
        kfree(table_context->overdrive_table);
        table_context->overdrive_table = NULL;
 
-       kfree(smu->irq_source);
-       smu->irq_source = NULL;
-
        ret = smu_fini_fb_allocations(smu);
        if (ret)
                return ret;
index e32ae9d..18e780f 100644 (file)
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                          void *value, int *size)
 {
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
        uint32_t sclk, mclk;
        int ret = 0;
 
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        case AMDGPU_PP_SENSOR_GPU_TEMP:
                *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
                break;
+       case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+               *(uint32_t *)value =  smu10_data->vcn_power_gated ? 0 : 1;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
 
 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
 {
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
        if (bgate) {
                amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_GATE);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_PowerDownVcn, 0);
+               smu10_data->vcn_power_gated = true;
        } else {
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_PowerUpVcn, 0);
                amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_UNGATE);
+               smu10_data->vcn_power_gated = false;
        }
 }
 
index 22e46a2..208e671 100644 (file)
@@ -429,7 +429,6 @@ struct smu_table_context
        struct smu_table                *tables;
        uint32_t                        table_count;
        struct smu_table                memory_pool;
-       uint16_t                        software_shutdown_temp;
        uint8_t                         thermal_controller_type;
        uint16_t                        TDPODLimit;
 
index 4aaad25..cc0a3b2 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "pp_debug.h"
 #include <linux/firmware.h>
+#include <linux/pci.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "atomfirmware.h"
@@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 {
        int ret = 0;
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
 
-       if (enable && power_gate->uvd_gated) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
-                       if (ret)
-                               return ret;
-               }
-               power_gate->uvd_gated = false;
+       if (enable) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+               if (ret)
+                       return ret;
        } else {
-               if (!enable && !power_gate->uvd_gated) {
-                       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
-                               ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
-                               if (ret)
-                                       return ret;
-                       }
-                       power_gate->uvd_gated = true;
-               }
+               ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+               if (ret)
+                       return ret;
        }
 
-       return 0;
+       ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
+
+       return ret;
 }
 
 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
@@ -1573,7 +1566,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
        uint32_t sclk_freq = 0, uclk_freq = 0;
        uint32_t uclk_level = 0;
 
-       switch (adev->rev_id) {
+       switch (adev->pdev->revision) {
        case 0xf0: /* XTX */
        case 0xc0:
                sclk_freq = NAVI10_PEAK_SCLK_XTX;
@@ -1620,6 +1613,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo
        return ret;
 }
 
+static int navi10_get_thermal_temperature_range(struct smu_context *smu,
+                                               struct smu_temperature_range *range)
+{
+       struct smu_table_context *table_context = &smu->smu_table;
+       struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
+
+       if (!range || !powerplay_table)
+               return -EINVAL;
+
+       /* The unit is temperature */
+       range->min = 0;
+       range->max = powerplay_table->software_shutdown_temp;
+
+       return 0;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
        .tables_init = navi10_tables_init,
        .alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1657,6 +1666,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .get_ppfeature_status = navi10_get_ppfeature_status,
        .set_ppfeature_status = navi10_set_ppfeature_status,
        .set_performance_level = navi10_set_performance_level,
+       .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
index caca909..ac5b262 100644 (file)
@@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
                                       struct smu_temperature_range *range)
 {
        struct amdgpu_device *adev = smu->adev;
-       int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+       int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
        uint32_t val;
 
        if (!range)
@@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
        if (high > range->max)
                high = range->max;
 
+       low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
+       high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+
        if (low > high)
                return -EINVAL;
 
@@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
-       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
+       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
        val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
 
        WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
 
        if (!smu->pm_enabled)
                return ret;
+
        ret = smu_get_thermal_temperature_range(smu, &range);
+       if (ret)
+               return ret;
 
        if (smu->smu_table.thermal_controller_type) {
                ret = smu_v11_0_set_thermal_range(smu, &range);
@@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
                        return ret;
        }
 
-       adev->pm.dpm.thermal.min_temp = range.min;
-       adev->pm.dpm.thermal.max_temp = range.max;
-       adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
-       adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
-       adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
-       adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
-       adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
-       adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
-       adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+       adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
 
        return ret;
 }
index dc139a6..dd6fd1c 100644 (file)
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
        memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
               sizeof(PPTable_t));
 
-       table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
        table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
        table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
 
@@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
        return 0;
 }
 
-static const struct smu_temperature_range vega20_thermal_policy[] =
-{
-       {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
-       { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
 static int vega20_get_thermal_temperature_range(struct smu_context *smu,
                                                struct smu_temperature_range *range)
 {
-
+       struct smu_table_context *table_context = &smu->smu_table;
+       ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
        PPTable_t *pptable = smu->smu_table.driver_pptable;
 
-       if (!range)
+       if (!range || !powerplay_table)
                return -EINVAL;
 
-       memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range));
-
-       range->max = pptable->TedgeLimit *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->hotspot_crit_max = pptable->ThotspotLimit *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->mem_crit_max = pptable->ThbmLimit *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       /* The unit is temperature */
+       range->min = 0;
+       range->max = powerplay_table->usSoftwareShutdownTemp;
+       range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
+       range->hotspot_crit_max = pptable->ThotspotLimit;
+       range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
+       range->mem_crit_max = pptable->ThbmLimit;
+       range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
 
 
        return 0;
index 1671db4..e9c55d1 100644 (file)
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        /* copy commands into RB: */
                        obj = submit->bos[submit->cmd[i].idx].obj;
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
index be39cf0..dc8ec2c 100644 (file)
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
index 9acbbc0..048c8be 100644 (file)
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                        /* ignore if there has not been a ctx switch: */
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
                                CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
index ff14555..78d5fa2 100644 (file)
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
        mdp5_crtc->enabled = false;
 }
 
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+       struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+       u32 count;
+
+       count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+       drm_crtc_set_max_vblank_count(crtc, count);
+
+       drm_crtc_vblank_on(crtc);
+}
+
 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_state)
 {
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
        }
 
        /* Restore vblank irq handling after power is enabled */
-       drm_crtc_vblank_on(crtc);
+       mdp5_crtc_vblank_on(crtc);
 
        mdp5_crtc_mode_set_nofb(crtc);
 
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
                mdp5_crtc_destroy_state(crtc, crtc->state);
 
        __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+
+       drm_crtc_vblank_reset(crtc);
 }
 
 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
index 4a60f5f..fec6ef1 100644 (file)
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
        dev->driver->get_scanout_position = mdp5_get_scanoutpos;
        dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
-       dev->max_vblank_count = 0xffffffff;
+       dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
        dev->vblank_disable_immediate = true;
 
        return kms;
index c226156..c356f5c 100644 (file)
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
        if (!np)
                return 0;
 
-       drm_of_component_match_add(dev, matchptr, compare_of, np);
+       if (of_device_is_available(np))
+               drm_of_component_match_add(dev, matchptr, compare_of, np);
 
        of_node_put(np);
 
index c2114c7..8cf6362 100644 (file)
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
        return !msm_obj->vram_node;
 }
 
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API.  Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+       struct device *dev = msm_obj->base.dev->dev;
+
+       if (get_dma_ops(dev)) {
+               dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       } else {
+               dma_map_sg(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+       struct device *dev = msm_obj->base.dev->dev;
+
+       if (get_dma_ops(dev)) {
+               dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       } else {
+               dma_unmap_sg(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       }
+}
+
 /* allocate pages from VRAM carveout, used when no IOMMU: */
 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 {
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
                 * because display controller, GPU, etc. are not coherent:
                 */
                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                       dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
-                                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+                       sync_for_device(msm_obj);
        }
 
        return msm_obj->pages;
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
                         * GPU, etc. are not coherent:
                         */
                        if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                               dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
-                                            msm_obj->sgt->nents,
-                                            DMA_BIDIRECTIONAL);
+                               sync_for_cpu(msm_obj);
 
                        sg_free_table(msm_obj->sgt);
                        kfree(msm_obj->sgt);
index 8497768..1267038 100644 (file)
@@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
                        drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
                                             connector->display_info.bpc * 3);
 
-       if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+       if (crtc_state->mode_changed) {
                slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
                                                      mstc->port,
                                                      asyh->dp.pbn);
index 8c92374..a835ceb 100644 (file)
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
                fault->inst, fault->addr, fault->access);
 }
 
+static inline bool
+nouveau_range_done(struct hmm_range *range)
+{
+       bool ret = hmm_range_valid(range);
+
+       hmm_range_unregister(range);
+       return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
+{
+       long ret;
+
+       range->default_flags = 0;
+       range->pfn_flags_mask = -1UL;
+
+       ret = hmm_range_register(range, mirror,
+                                range->start, range->end,
+                                PAGE_SHIFT);
+       if (ret) {
+               up_read(&range->vma->vm_mm->mmap_sem);
+               return (int)ret;
+       }
+
+       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
+               up_read(&range->vma->vm_mm->mmap_sem);
+               return -EAGAIN;
+       }
+
+       ret = hmm_range_fault(range, true);
+       if (ret <= 0) {
+               if (ret == 0)
+                       ret = -EBUSY;
+               up_read(&range->vma->vm_mm->mmap_sem);
+               hmm_range_unregister(range);
+               return ret;
+       }
+       return 0;
+}
+
 static int
 nouveau_svm_fault(struct nvif_notify *notify)
 {
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
                range.values = nouveau_svm_pfn_values;
                range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
 again:
-               ret = hmm_vma_fault(&svmm->mirror, &range, true);
+               ret = nouveau_range_fault(&svmm->mirror, &range);
                if (ret == 0) {
                        mutex_lock(&svmm->mutex);
-                       if (!hmm_vma_range_done(&range)) {
+                       if (!nouveau_range_done(&range)) {
                                mutex_unlock(&svmm->mutex);
                                goto again;
                        }
@@ -666,8 +707,8 @@ again:
                                                NULL);
                        svmm->vmm->vmm.object.client->super = false;
                        mutex_unlock(&svmm->mutex);
+                       up_read(&svmm->mm->mmap_sem);
                }
-               up_read(&svmm->mm->mmap_sem);
 
                /* Cancel any faults in the window whose pages didn't manage
                 * to keep their valid bit, or stay writeable when required.
index 888d89c..beee7b7 100644 (file)
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
                                          struct ib_udata *udata,
                                          struct ib_uobject *uobj)
 {
+       enum ib_qp_type qp_type = attr->qp_type;
        struct ib_qp *qp;
+       bool is_xrc;
 
        if (!dev->ops.create_qp)
                return ERR_PTR(-EOPNOTSUPP);
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
         * and more importantly they are created internaly by driver,
         * see mlx5 create_dev_resources() as an example.
         */
-       if (attr->qp_type < IB_QPT_XRC_INI) {
+       is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
+       if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
                qp->res.type = RDMA_RESTRACK_QP;
                if (uobj)
                        rdma_restrack_uadd(&qp->res);
index 01faef7..45d5164 100644 (file)
@@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
        u64 sum;
 
        port_counter = &dev->port_data[port].port_counter;
+       if (!port_counter->hstats)
+               return 0;
+
        sum = get_running_counters_hwstat_sum(dev, port, index);
        sum += port_counter->hstats->value[index];
 
@@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev)
        struct rdma_port_counter *port_counter;
        u32 port;
 
-       if (!dev->ops.alloc_hw_stats || !dev->port_data)
+       if (!dev->port_data)
                return;
 
        rdma_for_each_port(dev, port) {
@@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev)
                port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
                mutex_init(&port_counter->lock);
 
+               if (!dev->ops.alloc_hw_stats)
+                       continue;
+
                port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
                if (!port_counter->hstats)
                        goto fail;
@@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev)
        struct rdma_port_counter *port_counter;
        u32 port;
 
-       if (!dev->ops.alloc_hw_stats)
-               return;
-
        rdma_for_each_port(dev, port) {
                port_counter = &dev->port_data[port].port_counter;
                kfree(port_counter->hstats);
index 9773145..ea8661a 100644 (file)
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
 static DECLARE_RWSEM(devices_rwsem);
 #define DEVICE_REGISTERED XA_MARK_1
 
-static LIST_HEAD(client_list);
+static u32 highest_client_id;
 #define CLIENT_REGISTERED XA_MARK_1
 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
 static DECLARE_RWSEM(clients_rwsem);
 
+static void ib_client_put(struct ib_client *client)
+{
+       if (refcount_dec_and_test(&client->uses))
+               complete(&client->uses_zero);
+}
+
 /*
  * If client_data is registered then the corresponding client must also still
  * be registered.
@@ -660,6 +666,14 @@ static int add_client_context(struct ib_device *device,
                return 0;
 
        down_write(&device->client_data_rwsem);
+       /*
+        * So long as the client is registered hold both the client and device
+        * unregistration locks.
+        */
+       if (!refcount_inc_not_zero(&client->uses))
+               goto out_unlock;
+       refcount_inc(&device->refcount);
+
        /*
         * Another caller to add_client_context got here first and has already
         * completely initialized context.
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device,
        return 0;
 
 out:
+       ib_device_put(device);
+       ib_client_put(client);
+out_unlock:
        up_write(&device->client_data_rwsem);
        return ret;
 }
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device,
        client_data = xa_load(&device->client_data, client_id);
        xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
        client = xa_load(&clients, client_id);
-       downgrade_write(&device->client_data_rwsem);
+       up_write(&device->client_data_rwsem);
 
        /*
         * Notice we cannot be holding any exclusive locks when calling the
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device,
         *
         * For this reason clients and drivers should not call the
         * unregistration functions will holdling any locks.
-        *
-        * It tempting to drop the client_data_rwsem too, but this is required
-        * to ensure that unregister_client does not return until all clients
-        * are completely unregistered, which is required to avoid module
-        * unloading races.
         */
        if (client->remove)
                client->remove(device, client_data);
 
        xa_erase(&device->client_data, client_id);
-       up_read(&device->client_data_rwsem);
+       ib_device_put(device);
+       ib_client_put(client);
 }
 
 static int alloc_port_data(struct ib_device *device)
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device)
 
 static void disable_device(struct ib_device *device)
 {
-       struct ib_client *client;
+       u32 cid;
 
        WARN_ON(!refcount_read(&device->refcount));
 
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device)
        xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
        up_write(&devices_rwsem);
 
+       /*
+        * Remove clients in LIFO order, see assign_client_id. This could be
+        * more efficient if xarray learns to reverse iterate. Since no new
+        * clients can be added to this ib_device past this point we only need
+        * the maximum possible client_id value here.
+        */
        down_read(&clients_rwsem);
-       list_for_each_entry_reverse(client, &client_list, list)
-               remove_client_context(device, client->client_id);
+       cid = highest_client_id;
        up_read(&clients_rwsem);
+       while (cid) {
+               cid--;
+               remove_client_context(device, cid);
+       }
 
        /* Pairs with refcount_set in enable_device */
        ib_device_put(device);
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client)
        /*
         * The add/remove callbacks must be called in FIFO/LIFO order. To
         * achieve this we assign client_ids so they are sorted in
-        * registration order, and retain a linked list we can reverse iterate
-        * to get the LIFO order. The extra linked list can go away if xarray
-        * learns to reverse iterate.
+        * registration order.
         */
-       if (list_empty(&client_list)) {
-               client->client_id = 0;
-       } else {
-               struct ib_client *last;
-
-               last = list_last_entry(&client_list, struct ib_client, list);
-               client->client_id = last->client_id + 1;
-       }
+       client->client_id = highest_client_id;
        ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
        if (ret)
                goto out;
 
+       highest_client_id++;
        xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
-       list_add_tail(&client->list, &client_list);
 
 out:
        up_write(&clients_rwsem);
        return ret;
 }
 
+static void remove_client_id(struct ib_client *client)
+{
+       down_write(&clients_rwsem);
+       xa_erase(&clients, client->client_id);
+       for (; highest_client_id; highest_client_id--)
+               if (xa_load(&clients, highest_client_id - 1))
+                       break;
+       up_write(&clients_rwsem);
+}
+
 /**
  * ib_register_client - Register an IB client
  * @client:Client to register
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client)
        unsigned long index;
        int ret;
 
+       refcount_set(&client->uses, 1);
+       init_completion(&client->uses_zero);
        ret = assign_client_id(client);
        if (ret)
                return ret;
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client)
        unsigned long index;
 
        down_write(&clients_rwsem);
+       ib_client_put(client);
        xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
        up_write(&clients_rwsem);
-       /*
-        * Every device still known must be serialized to make sure we are
-        * done with the client callbacks before we return.
-        */
-       down_read(&devices_rwsem);
-       xa_for_each (&devices, index, device)
+
+       /* We do not want to have locks while calling client->remove() */
+       rcu_read_lock();
+       xa_for_each (&devices, index, device) {
+               if (!ib_device_try_get(device))
+                       continue;
+               rcu_read_unlock();
+
                remove_client_context(device, client->client_id);
-       up_read(&devices_rwsem);
 
-       down_write(&clients_rwsem);
-       list_del(&client->list);
-       xa_erase(&clients, client->client_id);
-       up_write(&clients_rwsem);
+               ib_device_put(device);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
+       /*
+        * remove_client_context() is not a fence, it can return even though a
+        * removal is ongoing. Wait until all removals are completed.
+        */
+       wait_for_completion(&client->uses_zero);
+       remove_client_id(client);
 }
 EXPORT_SYMBOL(ib_unregister_client);
 
index cc99479..9947d16 100644 (file)
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
        if (has_smi)
                cq_size *= 2;
 
+       port_priv->pd = ib_alloc_pd(device, 0);
+       if (IS_ERR(port_priv->pd)) {
+               dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+               ret = PTR_ERR(port_priv->pd);
+               goto error3;
+       }
+
        port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
                        IB_POLL_UNBOUND_WORKQUEUE);
        if (IS_ERR(port_priv->cq)) {
                dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
                ret = PTR_ERR(port_priv->cq);
-               goto error3;
-       }
-
-       port_priv->pd = ib_alloc_pd(device, 0);
-       if (IS_ERR(port_priv->pd)) {
-               dev_err(&device->dev, "Couldn't create ib_mad PD\n");
-               ret = PTR_ERR(port_priv->pd);
                goto error4;
        }
 
@@ -3278,11 +3278,11 @@ error8:
 error7:
        destroy_mad_qp(&port_priv->qp_info[0]);
 error6:
-       ib_dealloc_pd(port_priv->pd);
-error4:
        ib_free_cq(port_priv->cq);
        cleanup_recv_queue(&port_priv->qp_info[1]);
        cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+       ib_dealloc_pd(port_priv->pd);
 error3:
        kfree(port_priv);
 
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
        destroy_workqueue(port_priv->wq);
        destroy_mad_qp(&port_priv->qp_info[1]);
        destroy_mad_qp(&port_priv->qp_info[0]);
-       ib_dealloc_pd(port_priv->pd);
        ib_free_cq(port_priv->cq);
+       ib_dealloc_pd(port_priv->pd);
        cleanup_recv_queue(&port_priv->qp_info[1]);
        cleanup_recv_queue(&port_priv->qp_info[0]);
        /* XXX: Handle deallocation of MAD registration tables */
index 9f8a480..ffdeaf6 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
        if (get_user(id, arg))
                return -EFAULT;
+       if (id >= IB_UMAD_MAX_AGENTS)
+               return -EINVAL;
 
        mutex_lock(&file->port->file_mutex);
        mutex_lock(&file->mutex);
 
-       if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+       id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+       if (!__get_agent(file, id)) {
                ret = -EINVAL;
                goto out;
        }
index a91653a..098ab88 100644 (file)
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
        struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
        struct bnxt_qplib_gid *gid_to_del;
+       u16 vlan_id = 0xFFFF;
 
        /* Delete the entry from the hardware */
        ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
        if (sgid_tbl && sgid_tbl->active) {
                if (ctx->idx >= sgid_tbl->max)
                        return -EINVAL;
-               gid_to_del = &sgid_tbl->tbl[ctx->idx];
+               gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+               vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
                /* DEL_GID is called in WQ context(netdevice_event_work_handler)
                 * or via the ib_unregister_device path. In the former case QP1
                 * may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
                }
                ctx->refcnt--;
                if (!ctx->refcnt) {
-                       rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
+                       rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+                                                vlan_id,  true);
                        if (rc) {
                                dev_err(rdev_to_dev(rdev),
                                        "Failed to remove GID: %#x", rc);
index 37928b1..bdbde8e 100644 (file)
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
                                     struct bnxt_qplib_sgid_tbl *sgid_tbl,
                                     u16 max)
 {
-       sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
+       sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
        if (!sgid_tbl->tbl)
                return -ENOMEM;
 
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
        for (i = 0; i < sgid_tbl->max; i++) {
                if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
                           sizeof(bnxt_qplib_gid_zero)))
-                       bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
+                       bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+                                           sgid_tbl->tbl[i].vlan_id, true);
        }
-       memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+       memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
        memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
        sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                                     struct net_device *netdev)
 {
-       memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+       u32 i;
+
+       for (i = 0; i < sgid_tbl->max; i++)
+               sgid_tbl->tbl[i].vlan_id = 0xffff;
+
        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
 }
 
index 30c42c9..fbda11a 100644 (file)
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
 };
 
 struct bnxt_qplib_sgid_tbl {
-       struct bnxt_qplib_gid           *tbl;
+       struct bnxt_qplib_gid_info      *tbl;
        u16                             *hw_id;
        u16                             max;
        u16                             active;
index 48793d3..40296b9 100644 (file)
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
                        index, sgid_tbl->max);
                return -EINVAL;
        }
-       memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
+       memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
        return 0;
 }
 
 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
-                       struct bnxt_qplib_gid *gid, bool update)
+                       struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
 {
        struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
                                                   struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                return -ENOMEM;
        }
        for (index = 0; index < sgid_tbl->max; index++) {
-               if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
+               if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+                   vlan_id == sgid_tbl->tbl[index].vlan_id)
                        break;
        }
        if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                if (rc)
                        return rc;
        }
-       memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+       memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
               sizeof(bnxt_qplib_gid_zero));
+       sgid_tbl->tbl[index].vlan_id = 0xFFFF;
        sgid_tbl->vlan[index] = 0;
        sgid_tbl->active--;
        dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
        }
        free_idx = sgid_tbl->max;
        for (i = 0; i < sgid_tbl->max; i++) {
-               if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
+               if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+                   sgid_tbl->tbl[i].vlan_id == vlan_id) {
                        dev_dbg(&res->pdev->dev,
                                "SGID entry already exist in entry %d!\n", i);
                        *index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
        }
        /* Add GID to the sgid_tbl */
        memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+       sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
        sgid_tbl->active++;
        if (vlan_id != 0xFFFF)
                sgid_tbl->vlan[free_idx] = 1;
index 0ec3b12..13d9432 100644 (file)
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
        u8                              data[16];
 };
 
+struct bnxt_qplib_gid_info {
+       struct bnxt_qplib_gid gid;
+       u16 vlan_id;
+};
+
 struct bnxt_qplib_ah {
        struct bnxt_qplib_gid           dgid;
        struct bnxt_qplib_pd            *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
                        struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
                        struct bnxt_qplib_gid *gid);
 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
-                       struct bnxt_qplib_gid *gid, bool update);
+                       struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                        struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
                        bool update, u32 *index);
index d5b643a..67052dc 100644 (file)
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
                clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
 }
 
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
 {
        struct rsm_map_table *rmt;
        u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
        write_csr(dd, RCV_ERR_MASK, ~0ull);
 
        rmt = alloc_rsm_map_table(dd);
+       if (!rmt)
+               return -ENOMEM;
+
        /* set up QOS, including the QPN map table */
        init_qos(dd, rmt);
        init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
        val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
                RCV_BYPASS_HDR_SIZE_SHIFT);
        write_csr(dd, RCV_BYPASS, val);
+       return 0;
 }
 
 static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
                goto bail_cleanup;
 
        /* set initial RXE CSRs */
-       init_rxe(dd);
+       ret = init_rxe(dd);
+       if (ret)
+               goto bail_cleanup;
+
        /* set initial TXE CSRs */
        init_txe(dd);
        /* set initial non-RXE, non-TXE CSRs */
index 0477c14..024a7c2 100644 (file)
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
                    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
                        break;
                trdma_clean_swqe(qp, wqe);
-               rvt_qp_wqe_unreserve(qp, wqe);
                trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
                rvt_qp_complete_swqe(qp,
                                     wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
        if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
            cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
                trdma_clean_swqe(qp, wqe);
-               rvt_qp_wqe_unreserve(qp, wqe);
                trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
                rvt_qp_complete_swqe(qp,
                                     wqe,
index 92accca..996fc29 100644 (file)
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
                flows[i].req = req;
                flows[i].npagesets = 0;
                flows[i].pagesets[0].mapped =  0;
+               flows[i].resync_npkts = 0;
        }
        req->flows = flows;
        return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
        return NULL;
 }
 
-static struct tid_rdma_flow *
-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
-                  u32 psn, u16 *fidx)
-{
-       for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
-             tail = CIRC_NEXT(tail, MAX_FLOWS)) {
-               struct tid_rdma_flow *flow = &req->flows[tail];
-               u32 spsn, lpsn;
-
-               spsn = full_flow_psn(flow, flow->flow_state.spsn);
-               lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
-
-               if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
-                       if (fidx)
-                               *fidx = tail;
-                       return flow;
-               }
-       }
-       return NULL;
-}
-
-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
-                                      u32 psn, u16 *fidx)
-{
-       return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
-                                 fidx);
-}
-
 /* TID RDMA READ functions */
 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
                                    struct ib_other_headers *ohdr, u32 *bth1,
@@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                         * to prevent continuous Flow Sequence errors for any
                         * packets that could be still in the fabric.
                         */
-                       flow = find_flow(req, psn, NULL);
-                       if (!flow) {
-                               /*
-                                * We can't find the IB PSN matching the
-                                * received KDETH PSN. The only thing we can
-                                * do at this point is report the error to
-                                * the QP.
-                                */
-                               hfi1_kern_read_tid_flow_free(qp);
-                               spin_unlock(&qp->s_lock);
-                               rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-                               return ret;
-                       }
+                       flow = &req->flows[req->clear_tail];
                        if (priv->s_flags & HFI1_R_TID_SW_PSN) {
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
index c4b243f..646f615 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <rdma/opa_addr.h>
+#include <linux/nospec.h>
 
 #include "hfi.h"
 #include "common.h"
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        sl = rdma_ah_get_sl(ah_attr);
        if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
                return -EINVAL;
+       sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
 
        sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
index 8bf847b..5478219 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config INFINIBAND_HNS
-       tristate "HNS RoCE Driver"
+       bool "HNS RoCE Driver"
        depends on NET_VENDOR_HISILICON
        depends on ARM64 || (COMPILE_TEST && 64BIT)
        ---help---
@@ -11,7 +11,7 @@ config INFINIBAND_HNS
          To compile HIP06 or HIP08 driver as module, choose M here.
 
 config INFINIBAND_HNS_HIP06
-       bool "Hisilicon Hip06 Family RoCE support"
+       tristate "Hisilicon Hip06 Family RoCE support"
        depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
        ---help---
          RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06
          module will be called hns-roce-hw-v1
 
 config INFINIBAND_HNS_HIP08
-       bool "Hisilicon Hip08 Family RoCE support"
+       tristate "Hisilicon Hip08 Family RoCE support"
        depends on INFINIBAND_HNS && PCI && HNS3
        ---help---
          RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
index e105945..449a2d8 100644 (file)
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
        hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
        hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
 
-ifdef CONFIG_INFINIBAND_HNS_HIP06
 hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
 
-ifdef CONFIG_INFINIBAND_HNS_HIP08
 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
index 627aa46..c00714c 100644 (file)
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
                         struct ib_udata *udata, unsigned long virt,
                         struct hns_roce_db *db)
 {
+       unsigned long page_addr = virt & PAGE_MASK;
        struct hns_roce_user_db_page *page;
+       unsigned int offset;
        int ret = 0;
 
        mutex_lock(&context->page_mutex);
 
        list_for_each_entry(page, &context->page_list, list)
-               if (page->user_virt == (virt & PAGE_MASK))
+               if (page->user_virt == page_addr)
                        goto found;
 
        page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
        }
 
        refcount_set(&page->refcount, 1);
-       page->user_virt = (virt & PAGE_MASK);
-       page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+       page->user_virt = page_addr;
+       page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
        if (IS_ERR(page->umem)) {
                ret = PTR_ERR(page->umem);
                kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
        list_add(&page->list, &context->page_list);
 
 found:
-       db->dma = sg_dma_address(page->umem->sg_head.sgl) +
-                 (virt & ~PAGE_MASK);
-       page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
-       db->virt_addr = sg_virt(page->umem->sg_head.sgl);
+       offset = virt - page_addr;
+       db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
+       db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
        db->u.user_page = page;
        refcount_inc(&page->refcount);
 
index 81e6ded..c07e387 100644 (file)
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
        atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
 
        pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
-       if (!pd)
+       if (!pd) {
+               ret = -ENOMEM;
                goto alloc_mem_failed;
+       }
 
        pd->device  = ibdev;
        ret = hns_roce_alloc_pd(pd, NULL);
index c2a5780..e12a440 100644 (file)
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
                return;
        }
 
-       if (mpi->mdev_events.notifier_call)
-               mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
-       mpi->mdev_events.notifier_call = NULL;
-
        mpi->ibdev = NULL;
 
        spin_unlock(&port->mp.mpi_lock);
+       if (mpi->mdev_events.notifier_call)
+               mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+       mpi->mdev_events.notifier_call = NULL;
        mlx5_remove_netdev_notifier(ibdev, port_num);
        spin_lock(&port->mp.mpi_lock);
 
index c482f19..f6a5345 100644 (file)
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
        u64                             length;
        int                             access_flags;
        u32                             mkey;
+       u8                              ignore_free_state:1;
 };
 
 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
index 20ece6e..b74fad0 100644 (file)
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-       return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
        return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-       return order <= mr_cache_max_order(dev) &&
-               umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                return;
 
        c = order2idx(dev, mr->order);
-       if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
-               mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
-               return;
-       }
+       WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
 
-       if (unreg_umr(dev, mr))
+       if (unreg_umr(dev, mr)) {
+               mr->allocated_from_cache = false;
+               destroy_mkey(dev, mr);
+               ent = &cache->ent[c];
+               if (ent->cur < ent->limit)
+                       queue_work(cache->wq, &ent->work);
                return;
+       }
 
        ent = &cache->ent[c];
        spin_lock_irq(&ent->lock);
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_ib_mr *mr = NULL;
-       bool populate_mtts = false;
+       bool use_umr;
        struct ib_umem *umem;
        int page_shift;
        int npages;
@@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (err < 0)
                return ERR_PTR(err);
 
-       if (use_umr(dev, order)) {
+       use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+                 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+                  !MLX5_CAP_GEN(dev->mdev, atomic));
+
+       if (order <= mr_cache_max_order(dev) && use_umr) {
                mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
                                         page_shift, order, access_flags);
                if (PTR_ERR(mr) == -EAGAIN) {
                        mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
                        mr = NULL;
                }
-               populate_mtts = false;
        } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
                if (access_flags & IB_ACCESS_ON_DEMAND) {
                        err = -EINVAL;
                        pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
                        goto error;
                }
-               populate_mtts = true;
+               use_umr = false;
        }
 
        if (!mr) {
-               if (!umr_can_modify_entity_size(dev))
-                       populate_mtts = true;
                mutex_lock(&dev->slow_path_mutex);
                mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-                               page_shift, access_flags, populate_mtts);
+                               page_shift, access_flags, !use_umr);
                mutex_unlock(&dev->slow_path_mutex);
        }
 
@@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
        update_odp_mr(mr);
 
-       if (!populate_mtts) {
+       if (use_umr) {
                int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
                if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                return 0;
 
        umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
-                             MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+                             MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
        umrwr.wr.opcode = MLX5_IB_WR_UMR;
+       umrwr.pd = dev->umrc.pd;
        umrwr.mkey = mr->mmkey.key;
+       umrwr.ignore_free_state = 1;
 
        return mlx5_ib_post_send_wait(dev, &umrwr);
 }
@@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                mr->sig = NULL;
        }
 
-       mlx5_free_priv_descs(mr);
-
-       if (!allocated_from_cache)
+       if (!allocated_from_cache) {
                destroy_mkey(dev, mr);
+               mlx5_free_priv_descs(mr);
+       }
 }
 
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
index 5b642d8..81da820 100644 (file)
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
         * overwrite the same MTTs.  Concurent invalidations might race us,
         * but they will write 0s as well, so no difference in the end result.
         */
-
+       mutex_lock(&umem_odp->umem_mutex);
        for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
                idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
                /*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
                                   idx - blk_start_idx + 1, 0,
                                   MLX5_IB_UPD_XLT_ZAP |
                                   MLX5_IB_UPD_XLT_ATOMIC);
+       mutex_unlock(&umem_odp->umem_mutex);
        /*
         * We are now sure that the device will not access the
         * memory. We can safely unmap it, and mark it as dirty if
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
 
        num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
                                 w->num_sge, 0);
-       kfree(w);
+       kvfree(w);
 }
 
 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
        if (valid_req)
                queue_work(system_unbound_wq, &work->work);
        else
-               kfree(work);
+               kvfree(work);
 
        srcu_read_unlock(&dev->mr_srcu, srcu_key);
 
index 2a97619..379328b 100644 (file)
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                }
 
                MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
-               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, ucmd.rx_hash_key, len);
                break;
        }
@@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
 
        memset(umr, 0, sizeof(*umr));
 
-       if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
-               umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
-       else
-               umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+       if (!umrwr->ignore_free_state) {
+               if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+                        /* fail if free */
+                       umr->flags = MLX5_UMR_CHECK_FREE;
+               else
+                       /* fail if not free */
+                       umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+       }
 
        umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
        if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
index 533157a..f97b3d6 100644 (file)
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
        struct qedr_dev *dev =
                rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
 
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
 }
 static DEVICE_ATTR_RO(hw_rev);
 
 static ssize_t hca_type_show(struct device *device,
                             struct device_attribute *attr, char *buf)
 {
-       return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+       struct qedr_dev *dev =
+               rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+       return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
+                        dev->pdev->device,
+                        rdma_protocol_iwarp(&dev->ibdev, 1) ?
+                        "iWARP" : "RoCE");
 }
 static DEVICE_ATTR_RO(hca_type);
 
index a7cde98..9ce8a1b 100644 (file)
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
 static void siw_cep_set_inuse(struct siw_cep *cep)
 {
        unsigned long flags;
-       int rv;
 retry:
        spin_lock_irqsave(&cep->lock, flags);
 
        if (cep->in_use) {
                spin_unlock_irqrestore(&cep->lock, flags);
-               rv = wait_event_interruptible(cep->waitq, !cep->in_use);
+               wait_event_interruptible(cep->waitq, !cep->in_use);
                if (signal_pending(current))
                        flush_signals(current);
                goto retry;
index f55c4e8..d0f140d 100644 (file)
@@ -612,6 +612,7 @@ static __init int siw_init_module(void)
 
        if (!siw_create_tx_threads()) {
                pr_info("siw: Could not start any TX thread\n");
+               rv = -ENOMEM;
                goto out_error;
        }
        /*
index 11383d9..e27bd5b 100644 (file)
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp)
 {
        struct siw_rx_stream *c_rx = &qp->rx_stream;
        struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
-       int size = crypto_shash_descsize(siw_crypto_shash) +
-                       sizeof(struct shash_desc);
+       int size;
 
        if (siw_crypto_shash == NULL)
                return -ENOENT;
 
+       size = crypto_shash_descsize(siw_crypto_shash) +
+               sizeof(struct shash_desc);
+
        c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
        c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
        if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
index 433f4d2..80a740d 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Virtio driver for the paravirtualized IOMMU
  *
- * Copyright (C) 2018 Arm Limited
+ * Copyright (C) 2019 Arm Limited
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -47,7 +47,10 @@ struct viommu_dev {
        /* Device configuration */
        struct iommu_domain_geometry    geometry;
        u64                             pgsize_bitmap;
-       u8                              domain_bits;
+       u32                             first_domain;
+       u32                             last_domain;
+       /* Supported MAP flags */
+       u32                             map_flags;
        u32                             probe_size;
 };
 
@@ -62,6 +65,7 @@ struct viommu_domain {
        struct viommu_dev               *viommu;
        struct mutex                    mutex; /* protects viommu pointer */
        unsigned int                    id;
+       u32                             map_flags;
 
        spinlock_t                      mappings_lock;
        struct rb_root_cached           mappings;
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
                return -ENOENT;
        case VIRTIO_IOMMU_S_FAULT:
                return -EFAULT;
+       case VIRTIO_IOMMU_S_NOMEM:
+               return -ENOMEM;
        case VIRTIO_IOMMU_S_IOERR:
        case VIRTIO_IOMMU_S_DEVERR:
        default:
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
 {
        int ret;
        struct viommu_domain *vdomain = to_viommu_domain(domain);
-       unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
-                                 (1U << viommu->domain_bits) - 1;
 
        vdomain->viommu         = viommu;
+       vdomain->map_flags      = viommu->map_flags;
 
        domain->pgsize_bitmap   = viommu->pgsize_bitmap;
        domain->geometry        = viommu->geometry;
 
-       ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
+       ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+                             viommu->last_domain, GFP_KERNEL);
        if (ret >= 0)
                vdomain->id = (unsigned int)ret;
 
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
                      phys_addr_t paddr, size_t size, int prot)
 {
        int ret;
-       int flags;
+       u32 flags;
        struct virtio_iommu_req_map map;
        struct viommu_domain *vdomain = to_viommu_domain(domain);
 
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
                (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
                (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
 
+       if (flags & ~vdomain->map_flags)
+               return -EINVAL;
+
        ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
        if (ret)
                return ret;
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
                goto err_free_vqs;
        }
 
-       viommu->domain_bits = 32;
+       viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+       viommu->last_domain = ~0U;
 
        /* Optional features */
        virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
                             struct virtio_iommu_config, input_range.end,
                             &input_end);
 
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
-                            struct virtio_iommu_config, domain_bits,
-                            &viommu->domain_bits);
+       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+                            struct virtio_iommu_config, domain_range.start,
+                            &viommu->first_domain);
+
+       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+                            struct virtio_iommu_config, domain_range.end,
+                            &viommu->last_domain);
 
        virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
                             struct virtio_iommu_config, probe_size,
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
                .force_aperture = true,
        };
 
+       if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
+               viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+
        viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
 
        virtio_device_ready(vdev);
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
 
 static unsigned int features[] = {
        VIRTIO_IOMMU_F_MAP_UNMAP,
-       VIRTIO_IOMMU_F_DOMAIN_BITS,
        VIRTIO_IOMMU_F_INPUT_RANGE,
+       VIRTIO_IOMMU_F_DOMAIN_RANGE,
        VIRTIO_IOMMU_F_PROBE,
+       VIRTIO_IOMMU_F_MMIO,
 };
 
 static struct virtio_device_id id_table[] = {
index caaee80..7b6c3ee 100644 (file)
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 
 /* validate the dax capability of the target device span */
 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
-                                      sector_t start, sector_t len, void *data)
+                       sector_t start, sector_t len, void *data)
 {
        int blocksize = *(int *) data;
 
        return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
-                       start, len);
+                                      start, len);
 }
 
 /* Check devices support synchronous DAX */
-static int device_synchronous(struct dm_target *ti, struct dm_dev *dev,
-                                      sector_t start, sector_t len, void *data)
+static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+                                 sector_t start, sector_t len, void *data)
 {
-       return dax_synchronous(dev->dax_dev);
+       return dev->dax_dev && dax_synchronous(dev->dax_dev);
 }
 
 bool dm_table_supports_dax(struct dm_table *t,
-                         iterate_devices_callout_fn iterate_fn, int *blocksize)
+                          iterate_devices_callout_fn iterate_fn, int *blocksize)
 {
        struct dm_target *ti;
        unsigned i;
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t,
                        return false;
 
                if (!ti->type->iterate_devices ||
-                       !ti->type->iterate_devices(ti, iterate_fn, blocksize))
+                   !ti->type->iterate_devices(ti, iterate_fn, blocksize))
                        return false;
        }
 
@@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
-               if (dm_table_supports_dax(t, device_synchronous, NULL))
+               if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
                        set_dax_synchronous(t->md->dax_dev);
        }
        else
index e327f80..7102e2e 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kthread.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
                goto free_tag_set;
        }
 
+       if (mmc_host_is_spi(host) && host->use_spi_crc)
+               mq->queue->backing_dev_info->capabilities |=
+                       BDI_CAP_STABLE_WRITES;
+
        mq->queue->queuedata = mq;
        blk_queue_rq_timeout(mq->queue, 60 * HZ);
 
index faaaf52..eea52e2 100644 (file)
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
                                 * delayed. Allowing the transfer to take place
                                 * avoids races and keeps things simple.
                                 */
-                               if ((err != -ETIMEDOUT) &&
-                                   (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+                               if (err != -ETIMEDOUT) {
                                        state = STATE_SENDING_DATA;
                                        continue;
                                }
index 2d736e4..ba9a63d 100644 (file)
@@ -73,7 +73,7 @@
        #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK               GENMASK(7, 6)
        #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK               BIT(8)
        #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD               BIT(9)
-       #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK          GENMASK(10, 13)
+       #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK          GENMASK(13, 10)
        #define MESON_MX_SDIO_IRQC_SOFT_RESET                   BIT(15)
        #define MESON_MX_SDIO_IRQC_FORCE_HALT                   BIT(30)
        #define MESON_MX_SDIO_IRQC_HALT_HOLE                    BIT(31)
index 6ee340a..603a5d9 100644 (file)
@@ -624,6 +624,7 @@ err_cleanup_host:
        sdhci_cleanup_host(host);
 
 pm_runtime_disable:
+       pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
index 48d6f0d..83ed1fb 100644 (file)
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
 
+static const struct spi_device_id olpc_xo175_ec_id_table[] = {
+       { "xo1.75-ec", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
+
 static struct spi_driver olpc_xo175_ec_spi_driver = {
        .driver = {
                .name   = "olpc-xo175-ec",
index 235c0b8..c510d0d 100644 (file)
@@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
        INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
        INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
+       INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
        {}
 };
 
index b0d3110..e4c68ef 100644 (file)
@@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = {
 
 static struct gpio_keys_button apu2_keys_buttons[] = {
        {
-               .code                   = KEY_SETUP,
+               .code                   = KEY_RESTART,
                .active_low             = 1,
                .desc                   = "front button",
                .type                   = EV_KEY,
@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
 MODULE_ALIAS("platform:pcengines-apuv2");
-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
-MODULE_SOFTDEP("pre: platform:leds-gpio");
-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
+MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
index 8192963..42a8c2a 100644 (file)
@@ -96,7 +96,7 @@ struct vhost_uaddr {
 };
 
 #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 1
+#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
 #else
 #define VHOST_ARCH_CAN_ACCEL_UACCESS 0
 #endif
index 89116af..e5d8531 100644 (file)
@@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
        ulist_init(roots);
        ulist_init(tmp);
 
-       trans = btrfs_attach_transaction(root);
+       trans = btrfs_join_transaction_nostart(root);
        if (IS_ERR(trans)) {
                if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
                        ret = PTR_ERR(trans);
index 69b59bf..c3c0c06 100644 (file)
@@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
-               if (result == BTRFS_COMPARE_TREE_CHANGED) {
-                       struct extent_buffer *leaf_l;
-                       struct extent_buffer *leaf_r;
-                       struct btrfs_file_extent_item *ei_l;
-                       struct btrfs_file_extent_item *ei_r;
-
-                       leaf_l = sctx->left_path->nodes[0];
-                       leaf_r = sctx->right_path->nodes[0];
-                       ei_l = btrfs_item_ptr(leaf_l,
-                                             sctx->left_path->slots[0],
-                                             struct btrfs_file_extent_item);
-                       ei_r = btrfs_item_ptr(leaf_r,
-                                             sctx->right_path->slots[0],
-                                             struct btrfs_file_extent_item);
-
-                       /*
-                        * We may have found an extent item that has changed
-                        * only its disk_bytenr field and the corresponding
-                        * inode item was not updated. This case happens due to
-                        * very specific timings during relocation when a leaf
-                        * that contains file extent items is COWed while
-                        * relocation is ongoing and its in the stage where it
-                        * updates data pointers. So when this happens we can
-                        * safely ignore it since we know it's the same extent,
-                        * but just at different logical and physical locations
-                        * (when an extent is fully replaced with a new one, we
-                        * know the generation number must have changed too,
-                        * since snapshot creation implies committing the current
-                        * transaction, and the inode item must have been updated
-                        * as well).
-                        * This replacement of the disk_bytenr happens at
-                        * relocation.c:replace_file_extents() through
-                        * relocation.c:btrfs_reloc_cow_block().
-                        */
-                       if (btrfs_file_extent_generation(leaf_l, ei_l) ==
-                           btrfs_file_extent_generation(leaf_r, ei_r) &&
-                           btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
-                           btrfs_file_extent_compression(leaf_l, ei_l) ==
-                           btrfs_file_extent_compression(leaf_r, ei_r) &&
-                           btrfs_file_extent_encryption(leaf_l, ei_l) ==
-                           btrfs_file_extent_encryption(leaf_r, ei_r) &&
-                           btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
-                           btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
-                           btrfs_file_extent_type(leaf_l, ei_l) ==
-                           btrfs_file_extent_type(leaf_r, ei_r) &&
-                           btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
-                           btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
-                           btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
-                           btrfs_file_extent_offset(leaf_l, ei_l) ==
-                           btrfs_file_extent_offset(leaf_r, ei_r) &&
-                           btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_num_bytes(leaf_r, ei_r))
-                               return 0;
-               }
-
-               inconsistent_snapshot_error(sctx, result, "extent");
-               return -EIO;
-       }
+       /*
+        * We have found an extent item that changed without the inode item
+        * having changed. This can happen either after relocation (where the
+        * disk_bytenr of an extent item is replaced at
+        * relocation.c:replace_file_extents()) or after deduplication into a
+        * file in both the parent and send snapshots (where an extent item can
+        * get modified or replaced with a new one). Note that deduplication
+        * updates the inode item, but it only changes the iversion (sequence
+        * field in the inode item) of the inode, so if a file is deduplicated
+        * the same amount of times in both the parent and send snapshots, its
+        * iversion becames the same in both snapshots, whence the inode item is
+        * the same on both snapshots.
+        */
+       if (sctx->cur_ino != sctx->cmp_key->objectid)
+               return 0;
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result != BTRFS_COMPARE_TREE_DELETED)
index 3b8ae1a..e3adb71 100644 (file)
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
        [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
        [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
                                           __TRANS_ATTACH |
-                                          __TRANS_JOIN),
+                                          __TRANS_JOIN |
+                                          __TRANS_JOIN_NOSTART),
        [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
                                           __TRANS_ATTACH |
                                           __TRANS_JOIN |
-                                          __TRANS_JOIN_NOLOCK),
+                                          __TRANS_JOIN_NOLOCK |
+                                          __TRANS_JOIN_NOSTART),
        [TRANS_STATE_COMPLETED]         = (__TRANS_START |
                                           __TRANS_ATTACH |
                                           __TRANS_JOIN |
-                                          __TRANS_JOIN_NOLOCK),
+                                          __TRANS_JOIN_NOLOCK |
+                                          __TRANS_JOIN_NOSTART),
 };
 
 void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -543,7 +546,8 @@ again:
                ret = join_transaction(fs_info, type);
                if (ret == -EBUSY) {
                        wait_current_trans(fs_info);
-                       if (unlikely(type == TRANS_ATTACH))
+                       if (unlikely(type == TRANS_ATTACH ||
+                                    type == TRANS_JOIN_NOSTART))
                                ret = -ENOENT;
                }
        } while (ret == -EBUSY);
@@ -659,6 +663,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
                                 BTRFS_RESERVE_NO_FLUSH, true);
 }
 
+/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+       return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+                                BTRFS_RESERVE_NO_FLUSH, true);
+}
+
 /*
  * btrfs_attach_transaction() - catch the running transaction
  *
@@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
                }
        } else {
                spin_unlock(&fs_info->trans_lock);
+               /*
+                * The previous transaction was aborted and was already removed
+                * from the list of transactions at fs_info->trans_list. So we
+                * abort to prevent writing a new superblock that reflects a
+                * corrupt state (pointing to trees with unwritten nodes/leafs).
+                */
+               if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+                       ret = -EROFS;
+                       goto cleanup_transaction;
+               }
        }
 
        extwriter_counter_dec(cur_trans, trans->type);
index 527ea94..2c5a6f6 100644 (file)
@@ -94,11 +94,13 @@ struct btrfs_transaction {
 #define __TRANS_JOIN           (1U << 11)
 #define __TRANS_JOIN_NOLOCK    (1U << 12)
 #define __TRANS_DUMMY          (1U << 13)
+#define __TRANS_JOIN_NOSTART   (1U << 14)
 
 #define TRANS_START            (__TRANS_START | __TRANS_FREEZABLE)
 #define TRANS_ATTACH           (__TRANS_ATTACH)
 #define TRANS_JOIN             (__TRANS_JOIN | __TRANS_FREEZABLE)
 #define TRANS_JOIN_NOLOCK      (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART     (__TRANS_JOIN_NOSTART)
 
 #define TRANS_EXTWRITERS       (__TRANS_START | __TRANS_ATTACH)
 
@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
                                        int min_factor);
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
                                        struct btrfs_root *root);
index a237141..b64964e 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
        /* If we were the only waiter woken, wake the next one */
-       if (entry && dax_is_conflict(entry))
+       if (entry && !dax_is_conflict(entry))
                dax_wake_entry(xas, entry, false);
 }
 
index f8d46df..3e58a6f 100644 (file)
@@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       u32 oldflags;
 
        /* Is it quota file? Do not allow user to mess with it */
        if (IS_NOQUOTA(inode))
                return -EPERM;
 
-       oldflags = fi->i_flags;
-
-       if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
-               if (!capable(CAP_LINUX_IMMUTABLE))
-                       return -EPERM;
-
-       fi->i_flags = iflags | (oldflags & ~mask);
+       fi->i_flags = iflags | (fi->i_flags & ~mask);
 
        if (fi->i_flags & F2FS_PROJINHERIT_FL)
                set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
-       u32 fsflags;
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       u32 fsflags, old_fsflags;
        u32 iflags;
        int ret;
 
@@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 
        inode_lock(inode);
 
+       old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
+       ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+       if (ret)
+               goto out;
+
        ret = f2fs_setflags_common(inode, iflags,
                        f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
+out:
        inode_unlock(inode);
        mnt_drop_write_file(filp);
        return ret;
@@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags)
        return iflags;
 }
 
-static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
+static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
 {
-       struct inode *inode = file_inode(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct fsxattr fa;
 
-       memset(&fa, 0, sizeof(struct fsxattr));
-       fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
+       simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
 
        if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
-               fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
-                                                       fi->i_projid);
-
-       if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
-               return -EFAULT;
-       return 0;
+               fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
 }
 
-static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
 {
-       /*
-        * Project Quota ID state is only allowed to change from within the init
-        * namespace. Enforce that restriction only if we are trying to change
-        * the quota ID state. Everything else is allowed in user namespaces.
-        */
-       if (current_user_ns() == &init_user_ns)
-               return 0;
+       struct inode *inode = file_inode(filp);
+       struct fsxattr fa;
 
-       if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
-               return -EINVAL;
-
-       if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
-               if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
-                       return -EINVAL;
-       } else {
-               if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
-                       return -EINVAL;
-       }
+       f2fs_fill_fsxattr(inode, &fa);
 
+       if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
+               return -EFAULT;
        return 0;
 }
 
 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
-       struct fsxattr fa;
+       struct fsxattr fa, old_fa;
        u32 iflags;
        int err;
 
@@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
                return err;
 
        inode_lock(inode);
-       err = f2fs_ioctl_check_project(inode, &fa);
+
+       f2fs_fill_fsxattr(inode, &old_fa);
+       err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
        if (err)
                goto out;
+
        err = f2fs_setflags_common(inode, iflags,
                        f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
        if (err)
index 6691f52..8974672 100644 (file)
@@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
        if (lfs_mode)
                down_write(&fio.sbi->io_order_lock);
 
+       mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+                                       fio.old_blkaddr, false);
+       if (!mpage)
+               goto up_out;
+
+       fio.encrypted_page = mpage;
+
+       /* read source block in mpage */
+       if (!PageUptodate(mpage)) {
+               err = f2fs_submit_page_bio(&fio);
+               if (err) {
+                       f2fs_put_page(mpage, 1);
+                       goto up_out;
+               }
+               lock_page(mpage);
+               if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
+                                               !PageUptodate(mpage))) {
+                       err = -EIO;
+                       f2fs_put_page(mpage, 1);
+                       goto up_out;
+               }
+       }
+
        f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
                                        &sum, CURSEG_COLD_DATA, NULL, false);
 
@@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
                                newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
        if (!fio.encrypted_page) {
                err = -ENOMEM;
-               goto recover_block;
-       }
-
-       mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
-                                       fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
-       if (mpage) {
-               bool updated = false;
-
-               if (PageUptodate(mpage)) {
-                       memcpy(page_address(fio.encrypted_page),
-                                       page_address(mpage), PAGE_SIZE);
-                       updated = true;
-               }
                f2fs_put_page(mpage, 1);
-               invalidate_mapping_pages(META_MAPPING(fio.sbi),
-                                       fio.old_blkaddr, fio.old_blkaddr);
-               if (updated)
-                       goto write_page;
-       }
-
-       err = f2fs_submit_page_bio(&fio);
-       if (err)
-               goto put_page_out;
-
-       /* write page */
-       lock_page(fio.encrypted_page);
-
-       if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
-               err = -EIO;
-               goto put_page_out;
-       }
-       if (unlikely(!PageUptodate(fio.encrypted_page))) {
-               err = -EIO;
-               goto put_page_out;
+               goto recover_block;
        }
 
-write_page:
+       /* write target block */
        f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
+       memcpy(page_address(fio.encrypted_page),
+                               page_address(mpage), PAGE_SIZE);
+       f2fs_put_page(mpage, 1);
+       invalidate_mapping_pages(META_MAPPING(fio.sbi),
+                               fio.old_blkaddr, fio.old_blkaddr);
+
        set_page_dirty(fio.encrypted_page);
        if (clear_page_dirty_for_io(fio.encrypted_page))
                dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -871,11 +868,12 @@ write_page:
 put_page_out:
        f2fs_put_page(fio.encrypted_page, 1);
 recover_block:
-       if (lfs_mode)
-               up_write(&fio.sbi->io_order_lock);
        if (err)
                f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
                                                                true, true);
+up_out:
+       if (lfs_mode)
+               up_write(&fio.sbi->io_order_lock);
 put_out:
        f2fs_put_dnode(&dn);
 out:
index 6de6cda..78a1b87 100644 (file)
@@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        size_t crc_offset = 0;
        __u32 crc = 0;
 
+       if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
+               f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
+                         F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
+               return -EINVAL;
+       }
+
        /* Check checksum_offset and crc in superblock */
        if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
                crc_offset = le32_to_cpu(raw_super->checksum_offset);
@@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        offsetof(struct f2fs_super_block, crc)) {
                        f2fs_info(sbi, "Invalid SB checksum offset: %zu",
                                  crc_offset);
-                       return 1;
+                       return -EFSCORRUPTED;
                }
                crc = le32_to_cpu(raw_super->crc);
                if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
                        f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
-                       return 1;
+                       return -EFSCORRUPTED;
                }
        }
 
-       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
-               f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
-                         F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
-               return 1;
-       }
-
        /* Currently, support only 4KB page cache size */
        if (F2FS_BLKSIZE != PAGE_SIZE) {
                f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
                          PAGE_SIZE);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support only 4KB block size */
@@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        if (blocksize != F2FS_BLKSIZE) {
                f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
                          blocksize);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check log blocks per segment */
        if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
                f2fs_info(sbi, "Invalid log blocks per segment (%u)",
                          le32_to_cpu(raw_super->log_blocks_per_seg));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                                F2FS_MIN_LOG_SECTOR_SIZE) {
                f2fs_info(sbi, "Invalid log sectorsize (%u)",
                          le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->log_sectors_per_block) +
                le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
                          le32_to_cpu(raw_super->log_sectors_per_block),
                          le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        if (segment_count > F2FS_MAX_SEGMENT ||
                                segment_count < F2FS_MIN_SEGMENTS) {
                f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (total_sections > segment_count ||
@@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        segs_per_sec > segment_count || !segs_per_sec) {
                f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
                          segment_count, total_sections, segs_per_sec);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if ((segment_count / segs_per_sec) < total_sections) {
                f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
                          segment_count, segs_per_sec, total_sections);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
                f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
                          segment_count, le64_to_cpu(raw_super->block_count));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (secs_per_zone > total_sections || !secs_per_zone) {
                f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
                          secs_per_zone, total_sections);
-               return 1;
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
                        raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                          le32_to_cpu(raw_super->extension_count),
                          raw_super->hot_ext_count,
                          F2FS_MAX_EXTENSION);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (le32_to_cpu(raw_super->cp_payload) >
@@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_info(sbi, "Insane cp_payload (%u > %u)",
                          le32_to_cpu(raw_super->cp_payload),
                          blocks_per_seg - F2FS_CP_PACKS);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check reserved ino info */
@@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                          le32_to_cpu(raw_super->node_ino),
                          le32_to_cpu(raw_super->meta_ino),
                          le32_to_cpu(raw_super->root_ino));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
        if (sanity_check_area_boundary(sbi, bh))
-               return 1;
+               return -EFSCORRUPTED;
 
        return 0;
 }
@@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
                }
 
                /* sanity checking of raw super */
-               if (sanity_check_raw_super(sbi, bh)) {
+               err = sanity_check_raw_super(sbi, bh);
+               if (err) {
                        f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
                                 block + 1);
-                       err = -EFSCORRUPTED;
                        brelse(bh);
                        continue;
                }
index 79581b9..4df26ef 100644 (file)
@@ -1002,11 +1002,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
                                 unsigned copied, struct page *page,
                                 struct iomap *iomap)
 {
+       struct gfs2_trans *tr = current->journal_info;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
 
        if (page && !gfs2_is_stuffed(ip))
                gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+
+       if (tr->tr_num_buf_new)
+               __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
        gfs2_trans_end(sdp);
 }
 
@@ -1099,8 +1104,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                tr = current->journal_info;
                if (tr->tr_num_buf_new)
                        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
-               else
-                       gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
 
                gfs2_trans_end(sdp);
        }
@@ -1181,10 +1184,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
 
        if (ip->i_qadata && ip->i_qadata->qa_qd_num)
                gfs2_quota_unlock(ip);
+
+       if (unlikely(!written))
+               goto out_unlock;
+
        if (iomap->flags & IOMAP_F_SIZE_CHANGED)
                mark_inode_dirty(inode);
-       gfs2_write_unlock(inode);
+       set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 
+out_unlock:
+       gfs2_write_unlock(inode);
 out:
        return 0;
 }
index 113c58f..5960578 100644 (file)
@@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super);
 
 bool mount_capable(struct fs_context *fc)
 {
-       struct user_namespace *user_ns = fc->global ? &init_user_ns
-                                                   : fc->user_ns;
-
        if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
                return capable(CAP_SYS_ADMIN);
        else
-               return ns_capable(user_ns, CAP_SYS_ADMIN);
+               return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
 }
 
 /**
index 3c096c7..853a8f1 100644 (file)
@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
 /**
  * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
  * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
  * @clks: pointer to the clk_bulk_data table of consumer
  *
  * Behaves the same as devm_clk_bulk_get() except where there is no clock
index 9ddcf50..a7f08fb 100644 (file)
@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void devm_gpiod_unhinge(struct device *dev,
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(descs);
 }
 
 static inline struct gpio_desc *__must_check
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void devm_gpiod_put_array(struct device *dev,
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(descs);
 }
 
 
 static inline int gpiod_get_direction(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_input(struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 static inline int gpiod_get_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_array_value(unsigned int array_size,
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
                                        unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_array_value(unsigned int array_size,
                                        struct gpio_desc **desc_array,
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
                                        unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
                                            unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value(unsigned int array_size,
                                            struct gpio_desc **desc_array,
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
                                            unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 
 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
                                     unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
                                            struct gpio_desc **desc_array,
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
                                            unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
                                               unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
                                                int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
                                                struct gpio_desc **desc_array,
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
                                                unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 
 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
 static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 
 static inline int gpiod_to_irq(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
                                          const char *name)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
 static inline int desc_to_gpio(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
index b8a08b2..7ef56dc 100644 (file)
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
  */
 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
 
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline bool hmm_vma_range_done(struct hmm_range *range)
-{
-       bool ret = hmm_range_valid(range);
-
-       hmm_range_unregister(range);
-       return ret;
-}
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_mirror *mirror,
-                               struct hmm_range *range, bool block)
-{
-       long ret;
-
-       /*
-        * With the old API the driver must set each individual entries with
-        * the requested flags (valid, write, ...). So here we set the mask to
-        * keep intact the entries provided by the driver and zero out the
-        * default_flags.
-        */
-       range->default_flags = 0;
-       range->pfn_flags_mask = -1UL;
-
-       ret = hmm_range_register(range, mirror,
-                                range->start, range->end,
-                                PAGE_SHIFT);
-       if (ret)
-               return (int)ret;
-
-       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
-               /*
-                * The mmap_sem was taken by driver we release it here and
-                * returns -EAGAIN which correspond to mmap_sem have been
-                * drop in the old API.
-                */
-               up_read(&range->vma->vm_mm->mmap_sem);
-               return -EAGAIN;
-       }
-
-       ret = hmm_range_fault(range, block);
-       if (ret <= 0) {
-               if (ret == -EBUSY || !ret) {
-                       /* Same as above, drop mmap_sem to match old API. */
-                       up_read(&range->vma->vm_mm->mmap_sem);
-                       ret = -EBUSY;
-               } else if (ret == -EAGAIN)
-                       ret = -EBUSY;
-               hmm_range_unregister(range);
-               return ret;
-       }
-       return 0;
-}
-
 /* Below are for HMM internal use only! Not to be used by device driver! */
 static inline void hmm_mm_init(struct mm_struct *mm)
 {
index b2c1648..5714fd3 100644 (file)
@@ -814,6 +814,7 @@ struct tee_client_device_id {
 /**
  * struct wmi_device_id - WMI device identifier
  * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @context: pointer to driver specific data
  */
 struct wmi_device_id {
        const char guid_string[UUID_STRING_LEN+1];
index c5f8a9f..4f22517 100644 (file)
@@ -2647,7 +2647,9 @@ struct ib_client {
                        const union ib_gid *gid,
                        const struct sockaddr *addr,
                        void *client_data);
-       struct list_head list;
+
+       refcount_t uses;
+       struct completion uses_zero;
        u32 client_id;
 
        /* kverbs are not required by the client */
index 0eeea52..e06c77d 100644 (file)
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
 /**
  * rvt_qp_wqe_unreserve - clean reserved operation
  * @qp - the rvt qp
- * @wqe - the send wqe
+ * @flags - send wqe flags
  *
  * This decrements the reserve use count.
  *
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
  * the compiler does not juggle the order of the s_last
  * ring index and the decrementing of s_reserved_used.
  */
-static inline void rvt_qp_wqe_unreserve(
-       struct rvt_qp *qp,
-       struct rvt_swqe *wqe)
+static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
 {
-       if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+       if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
                atomic_dec(&qp->s_reserved_used);
                /* insure no compiler re-order up to s_last change */
                smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
        u32 byte_len, last;
        int flags = wqe->wr.send_flags;
 
+       rvt_qp_wqe_unreserve(qp, flags);
        rvt_put_qp_swqe(qp, wqe);
 
        need_completion =
index 2212add..64e92d5 100644 (file)
@@ -2,7 +2,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM dma_fence
 
-#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_DMA_FENCE_H
 
 #include <linux/tracepoint.h>
index f3a1256..6678cf8 100644 (file)
@@ -3,7 +3,7 @@
 #define TRACE_SYSTEM napi
 
 #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_NAPI_H_
+#define _TRACE_NAPI_H
 
 #include <linux/netdevice.h>
 #include <linux/tracepoint.h>
@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
 
 #undef NO_DEV
 
-#endif /* _TRACE_NAPI_H_ */
+#endif /* _TRACE_NAPI_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
index 60d0d8b..0d1a9eb 100644 (file)
@@ -2,7 +2,7 @@
 #define TRACE_SYSTEM qdisc
 
 #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_QDISC_H_
+#define _TRACE_QDISC_H
 
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
                  __entry->txq_state, __entry->packets, __entry->skbaddr )
 );
 
-#endif /* _TRACE_QDISC_H_ */
+#endif /* _TRACE_QDISC_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
index 0818f62..971cd02 100644 (file)
@@ -1,5 +1,5 @@
 #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_TEGRA_APM_DMA_H
+#define _TRACE_TEGRA_APB_DMA_H
 
 #include <linux/tracepoint.h>
 #include <linux/dmaengine.h>
@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
        TP_printk("%s: irq %d\n",  __get_str(chan), __entry->irq)
 );
 
-#endif /*  _TRACE_TEGRADMA_H */
+#endif /* _TRACE_TEGRA_APB_DMA_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
index ba1b460..237e36a 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause */
 /*
- * Virtio-iommu definition v0.9
+ * Virtio-iommu definition v0.12
  *
- * Copyright (C) 2018 Arm Ltd.
+ * Copyright (C) 2019 Arm Ltd.
  */
 #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
 #define _UAPI_LINUX_VIRTIO_IOMMU_H
 
 /* Feature bits */
 #define VIRTIO_IOMMU_F_INPUT_RANGE             0
-#define VIRTIO_IOMMU_F_DOMAIN_BITS             1
+#define VIRTIO_IOMMU_F_DOMAIN_RANGE            1
 #define VIRTIO_IOMMU_F_MAP_UNMAP               2
 #define VIRTIO_IOMMU_F_BYPASS                  3
 #define VIRTIO_IOMMU_F_PROBE                   4
+#define VIRTIO_IOMMU_F_MMIO                    5
 
-struct virtio_iommu_range {
-       __u64                                   start;
-       __u64                                   end;
+struct virtio_iommu_range_64 {
+       __le64                                  start;
+       __le64                                  end;
+};
+
+struct virtio_iommu_range_32 {
+       __le32                                  start;
+       __le32                                  end;
 };
 
 struct virtio_iommu_config {
        /* Supported page sizes */
-       __u64                                   page_size_mask;
+       __le64                                  page_size_mask;
        /* Supported IOVA range */
-       struct virtio_iommu_range               input_range;
+       struct virtio_iommu_range_64            input_range;
        /* Max domain ID size */
-       __u8                                    domain_bits;
-       __u8                                    padding[3];
+       struct virtio_iommu_range_32            domain_range;
        /* Probe buffer size */
-       __u32                                   probe_size;
+       __le32                                  probe_size;
 };
 
 /* Request types */
@@ -49,6 +54,7 @@ struct virtio_iommu_config {
 #define VIRTIO_IOMMU_S_RANGE                   0x05
 #define VIRTIO_IOMMU_S_NOENT                   0x06
 #define VIRTIO_IOMMU_S_FAULT                   0x07
+#define VIRTIO_IOMMU_S_NOMEM                   0x08
 
 struct virtio_iommu_req_head {
        __u8                                    type;
@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
 
 #define VIRTIO_IOMMU_MAP_F_READ                        (1 << 0)
 #define VIRTIO_IOMMU_MAP_F_WRITE               (1 << 1)
-#define VIRTIO_IOMMU_MAP_F_EXEC                        (1 << 2)
-#define VIRTIO_IOMMU_MAP_F_MMIO                        (1 << 3)
+#define VIRTIO_IOMMU_MAP_F_MMIO                        (1 << 2)
 
 #define VIRTIO_IOMMU_MAP_F_MASK                        (VIRTIO_IOMMU_MAP_F_READ |      \
                                                 VIRTIO_IOMMU_MAP_F_WRITE |     \
-                                                VIRTIO_IOMMU_MAP_F_EXEC |      \
                                                 VIRTIO_IOMMU_MAP_F_MMIO)
 
 struct virtio_iommu_req_map {
index bfc0c17..2bd410f 100644 (file)
@@ -243,8 +243,9 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
 
        /* CMA can be used only in the context which permits sleeping */
        if (cma && gfpflags_allow_blocking(gfp)) {
-               align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
-               page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
+               size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
+
+               page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
        }
 
        /* Fallback allocation of normal pages */
@@ -266,7 +267,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
  */
 void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
 {
-       if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
+       if (!cma_release(dev_get_cma_area(dev), page,
+                        PAGE_ALIGN(size) >> PAGE_SHIFT))
                __free_pages(page, get_order(size));
 }
 
index 1f628e7..b945239 100644 (file)
@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
        int ret;
 
        if (!dev_is_dma_coherent(dev)) {
+               unsigned long pfn;
+
                if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
                        return -ENXIO;
 
-               page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
-                               dma_addr));
+               /* If the PFN is not valid, we do not have a struct page */
+               pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+               if (!pfn_valid(pfn))
+                       return -ENXIO;
+               page = pfn_to_page(pfn);
        } else {
                page = virt_to_page(cpu_addr);
        }
@@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        if (!dev_is_dma_coherent(dev)) {
                if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
                        return -ENXIO;
+
+               /* If the PFN is not valid, we do not have a struct page */
                pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+               if (!pfn_valid(pfn))
+                       return -ENXIO;
        } else {
                pfn = page_to_pfn(virt_to_page(cpu_addr));
        }
index 4436158..5b4a5dc 100644 (file)
@@ -734,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
                autoreap = true;
        }
 
-       tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
-       if (tsk->exit_state == EXIT_DEAD)
+       if (autoreap) {
+               tsk->exit_state = EXIT_DEAD;
                list_add(&tsk->ptrace_entry, &dead);
+       }
 
        /* mt-exec, de_thread() is waiting for group leader */
        if (unlikely(tsk->signal->notify_count < 0))
index 91b789d..349f5a6 100644 (file)
@@ -1885,6 +1885,7 @@ static void do_notify_pidfd(struct task_struct *task)
 {
        struct pid *pid;
 
+       WARN_ON(task->exit_state == 0);
        pid = task_pid(task);
        wake_up_all(&pid->wait_pidfd);
 }
index 69ebf3c..78af971 100644 (file)
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
                return 0;
 
+       /*
+        * Do not trace a function if it's filtered by set_graph_notrace.
+        * Make the index of ret stack negative to indicate that it should
+        * ignore further functions.  But it needs its own ret stack entry
+        * to recover the original index in order to continue tracing after
+        * returning from the function.
+        */
        if (ftrace_graph_notrace_addr(trace->func)) {
                trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
                /*
@@ -155,16 +162,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        if (ftrace_graph_ignore_irqs())
                return 0;
 
-       /*
-        * Do not trace a function if it's filtered by set_graph_notrace.
-        * Make the index of ret stack negative to indicate that it should
-        * ignore further functions.  But it needs its own ret stack entry
-        * to recover the original index in order to continue tracing after
-        * returning from the function.
-        */
-       if (ftrace_graph_notrace_addr(trace->func))
-               return 1;
-
        /*
         * Stop here if tracing_threshold is set. We only write function return
         * events to the ring buffer.
index 83a7b61..798275a 100644 (file)
@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
         * memory corruption is possible and we should stop execution.
         */
        BUG_ON(!trylock_page(page));
-       list_del(&page->lru);
        balloon_page_insert(b_dev_info, page);
        unlock_page(page);
        __count_vm_event(BALLOON_INFLATE);
@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
  * @b_dev_info: balloon device descriptor where we will insert a new page to
  * @pages: pages to enqueue - allocated using balloon_page_alloc.
  *
- * Driver must call it to properly enqueue a balloon pages before definitively
- * removing it from the guest system.
+ * Driver must call this function to properly enqueue balloon pages before
+ * definitively removing them from the guest system.
  *
  * Return: number of pages that were enqueued.
  */
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
 
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
        list_for_each_entry_safe(page, tmp, pages, lru) {
+               list_del(&page->lru);
                balloon_page_enqueue_one(b_dev_info, page);
                n_pages++;
        }
@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
  * @n_req_pages: number of requested pages.
  *
  * Driver must call this function to properly de-allocate a previous enlisted
- * balloon pages before definetively releasing it back to the guest system.
+ * balloon pages before definitively releasing it back to the guest system.
  * This function tries to remove @n_req_pages from the ballooned pages and
  * return them to the caller in the @pages list.
  *
- * Note that this function may fail to dequeue some pages temporarily empty due
- * to compaction isolated pages.
+ * Note that this function may fail to dequeue some pages even if the balloon
+ * isn't empty - since the page list can be temporarily empty due to compaction
+ * of isolated pages.
  *
  * Return: number of pages that were added to the @pages list.
  */
@@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
 
 /*
  * balloon_page_alloc - allocates a new page for insertion into the balloon
- *                       page list.
+ *                     page list.
+ *
+ * Driver must call this function to properly allocate a new balloon page.
+ * Driver must call balloon_page_enqueue before definitively removing the page
+ * from the guest system.
  *
- * Driver must call it to properly allocate a new enlisted balloon page.
- * Driver must call balloon_page_enqueue before definitively removing it from
- * the guest system.  This function returns the page address for the recently
- * allocated page or NULL in the case we fail to allocate a new page this turn.
+ * Return: struct page for the allocated page or NULL on allocation failure.
  */
 struct page *balloon_page_alloc(void)
 {
@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
 EXPORT_SYMBOL_GPL(balloon_page_alloc);
 
 /*
- * balloon_page_enqueue - allocates a new page and inserts it into the balloon
- *                       page list.
- * @b_dev_info: balloon device descriptor where we will insert a new page to
+ * balloon_page_enqueue - inserts a new page into the balloon page list.
+ *
+ * @b_dev_info: balloon device descriptor where we will insert a new page
  * @page: new page to enqueue - allocated using balloon_page_alloc.
  *
- * Driver must call it to properly enqueue a new allocated balloon page
- * before definitively removing it from the guest system.
- * This function returns the page address for the recently enqueued page or
- * NULL in the case we fail to allocate a new page this turn.
+ * Drivers must call this function to properly enqueue a new allocated balloon
+ * page before definitively removing the page from the guest system.
+ *
+ * Drivers must not call balloon_page_enqueue on pages that have been pushed to
+ * a list with balloon_page_push before removing them with balloon_page_pop. To
+ * enqueue a list of pages, use balloon_page_list_enqueue instead.
  */
 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
                          struct page *page)
@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
 
 /*
  * balloon_page_dequeue - removes a page from balloon's page list and returns
- *                       the its address to allow the driver release the page.
+ *                       its address to allow the driver to release the page.
  * @b_dev_info: balloon device decriptor where we will grab a page from.
  *
- * Driver must call it to properly de-allocate a previous enlisted balloon page
- * before definetively releasing it back to the guest system.
- * This function returns the page address for the recently dequeued page or
- * NULL in the case we find balloon's page list temporarily empty due to
- * compaction isolated pages.
+ * Driver must call this function to properly dequeue a previously enqueued page
+ * before definitively releasing it back to the guest system.
+ *
+ * Caller must perform its own accounting to ensure that this
+ * function is called only if some pages are actually enqueued.
+ *
+ * Note that this function may fail to dequeue some pages even if there are
+ * some enqueued pages - since the page list can be temporarily empty due to
+ * the compaction of isolated pages.
+ *
+ * TODO: remove the caller accounting requirements, and allow caller to wait
+ * until all pages can be dequeued.
+ *
+ * Return: struct page for the dequeued page, or NULL if no page was dequeued.
  */
 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
 {
@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
        if (n_pages != 1) {
                /*
                 * If we are unable to dequeue a balloon page because the page
-                * list is empty and there is no isolated pages, then something
+                * list is empty and there are no isolated pages, then something
                 * went out of track and some balloon pages are lost.
-                * BUG() here, otherwise the balloon driver may get stuck into
+                * BUG() here, otherwise the balloon driver may get stuck in
                 * an infinite loop while attempting to release all its pages.
                 */
                spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
 
        /*
         * We can not easily support the no copy case here so ignore it as it
-        * is unlikely to be use with ballon pages. See include/linux/hmm.h for
-        * user of the MIGRATE_SYNC_NO_COPY mode.
+        * is unlikely to be used with balloon pages. See include/linux/hmm.h
+        * for a user of the MIGRATE_SYNC_NO_COPY mode.
         */
        if (mode == MIGRATE_SYNC_NO_COPY)
                return -EINVAL;
index e1eedef..16b6731 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
  * @range: range
  * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
  *          permission (for instance asking for write and range is read only),
- *          -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
+ *          -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
  *          vma or it is illegal to access that range), number of valid pages
  *          in range->pfns[] (from range start address).
  *
@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
        do {
                /* If range is no longer valid force retry. */
                if (!range->valid)
-                       return -EAGAIN;
+                       return -EBUSY;
 
                vma = find_vma(hmm->mm, start);
                if (vma == NULL || (vma->vm_flags & device_vma))
@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
 
        do {
                /* If range is no longer valid force retry. */
-               if (!range->valid) {
-                       up_read(&hmm->mm->mmap_sem);
-                       return -EAGAIN;
-               }
+               if (!range->valid)
+                       return -EBUSY;
 
                vma = find_vma(hmm->mm, start);
                if (vma == NULL || (vma->vm_flags & device_vma))
index e6c030e..8834563 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
        void *old_tail = *tail ? *tail : *head;
        int rsize;
 
-       if (slab_want_init_on_free(s))
+       if (slab_want_init_on_free(s)) {
+               void *p = NULL;
+
                do {
                        object = next;
                        next = get_freepointer(s, object);
@@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                                                           : 0;
                        memset((char *)object + s->inuse, 0,
                               s->size - s->inuse - rsize);
-                       set_freepointer(s, object, next);
+                       set_freepointer(s, object, p);
+                       p = object;
                } while (object != old_tail);
+       }
 
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
index 12dd9b3..703857a 100644 (file)
@@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
                if (!to_check)
                        break; /* all drained */
                init_waitqueue_entry(&wait, current);
+               set_current_state(TASK_INTERRUPTIBLE);
                add_wait_queue(&to_check->sleep, &wait);
                snd_pcm_stream_unlock_irq(substream);
                if (runtime->no_period_wakeup)
@@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
                        }
                        tout = msecs_to_jiffies(tout * 1000);
                }
-               tout = schedule_timeout_interruptible(tout);
+               tout = schedule_timeout(tout);
 
                snd_pcm_stream_lock_irq(substream);
                group = snd_pcm_stream_group_ref(substream);
index 1192c75..3c2db38 100644 (file)
@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        if (!acomp)
                return -ENODEV;
        if (!acomp->ops) {
-               request_module("i915");
-               /* 60s timeout */
-               wait_for_completion_timeout(&bind_complete,
-                                           msecs_to_jiffies(60 * 1000));
+               if (!IS_ENABLED(CONFIG_MODULES) ||
+                   !request_module("i915")) {
+                       /* 60s timeout */
+                       wait_for_completion_timeout(&bind_complete,
+                                                  msecs_to_jiffies(60 * 1000));
+               }
        }
        if (!acomp->ops) {
                dev_info(bus->dev, "couldn't bind with audio component\n");
index 71d5f54..4c12cc5 100644 (file)
@@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
        struct usb_host_endpoint *ep;
 
        ep = usb_pipe_endpoint(dev, pipe);
-       if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+       if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
                return -EINVAL;
        return 0;
 }
index 71231ad..47315fe 100755 (executable)
@@ -262,7 +262,7 @@ test_mc_aware()
 
        stop_traffic
 
-       log_test "UC performace under MC overload"
+       log_test "UC performance under MC overload"
 
        echo "UC-only throughput  $(humanize $ucth1)"
        echo "UC+MC throughput    $(humanize $ucth2)"
@@ -316,7 +316,7 @@ test_uc_aware()
 
        stop_traffic
 
-       log_test "MC performace under UC overload"
+       log_test "MC performance under UC overload"
        echo "    ingress UC throughput $(humanize ${uc_ir})"
        echo "    egress UC throughput  $(humanize ${uc_er})"
        echo "    sent $attempts BC ARPs, got $passes responses"
index 0a76314..8b944cf 100755 (executable)
@@ -28,7 +28,7 @@
 # override by exporting to your environment prior running this script.
 # For instance this script assumes you do not have xfs loaded upon boot.
 # If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this
-# script if the filesyste module you don't have loaded upon bootup
+# script if the filesystem module you don't have loaded upon bootup
 # is ext4 instead. Refer to allow_user_defaults() for a list of user
 # override variables possible.
 #
@@ -263,7 +263,7 @@ config_get_test_result()
 config_reset()
 {
        if ! echo -n "1" >"$DIR"/reset; then
-               echo "$0: reset shuld have worked" >&2
+               echo "$0: reset should have worked" >&2
                exit 1
        fi
 }
@@ -488,7 +488,7 @@ usage()
        echo Example uses:
        echo
        echo "${TEST_NAME}.sh           -- executes all tests"
-       echo "${TEST_NAME}.sh -t 0008   -- Executes test ID 0008 number of times is recomended"
+       echo "${TEST_NAME}.sh -t 0008   -- Executes test ID 0008 number of times is recommended"
        echo "${TEST_NAME}.sh -w 0008   -- Watch test ID 0008 run until an error occurs"
        echo "${TEST_NAME}.sh -s 0008   -- Run test ID 0008 once"
        echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times"
index 3019544..edcfeac 100644 (file)
@@ -13,6 +13,14 @@ function log() {
        echo "$1" > /dev/kmsg
 }
 
+# skip(msg) - testing can't proceed
+#      msg - explanation
+function skip() {
+       log "SKIP: $1"
+       echo "SKIP: $1" >&2
+       exit 4
+}
+
 # die(msg) - game over, man
 #      msg - dying words
 function die() {
@@ -43,6 +51,12 @@ function loop_until() {
        done
 }
 
+function assert_mod() {
+       local mod="$1"
+
+       modprobe --dry-run "$mod" &>/dev/null
+}
+
 function is_livepatch_mod() {
        local mod="$1"
 
@@ -75,6 +89,9 @@ function __load_mod() {
 function load_mod() {
        local mod="$1"; shift
 
+       assert_mod "$mod" ||
+               skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
        is_livepatch_mod "$mod" &&
                die "use load_lp() to load the livepatch module $mod"
 
@@ -88,6 +105,9 @@ function load_mod() {
 function load_lp_nowait() {
        local mod="$1"; shift
 
+       assert_mod "$mod" ||
+               skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
        is_livepatch_mod "$mod" ||
                die "module $mod is not a livepatch"
 
index 7eaa8a3..b632965 100644 (file)
@@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void)
 
        ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
        if (ret < 0) {
-               /*
-                * pidfd_send_signal() will currently return ENOSYS when
-                * CONFIG_PROC_FS is not set.
-                */
                if (errno == ENOSYS)
                        ksft_exit_skip(
-                               "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n",
+                               "%s test: pidfd_send_signal() syscall not supported\n",
                                test_name);
 
                ksft_exit_fail_msg("%s test: Failed to send signal\n",
index 4602326..a4f4d4c 100644 (file)
@@ -451,7 +451,7 @@ static int test_vsys_x(void)
                printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n",
                       segv_err);
        } else {
-               printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n",
+               printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n",
                       segv_err);
                return 1;
        }