iommu/tegra-smmu: Expand mutex protection range
authorNicolin Chen <nicoleotsuka@gmail.com>
Wed, 25 Nov 2020 10:10:10 +0000 (02:10 -0800)
committerWill Deacon <will@kernel.org>
Wed, 25 Nov 2020 11:04:41 +0000 (11:04 +0000)
This is used to protect potential race condition at use_count.
since probes of client drivers, calling attach_dev(), may run
concurrently.

Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20201125101013.14953-3-nicoleotsuka@gmail.com
Signed-off-by: Will Deacon <will@kernel.org>
drivers/iommu/tegra-smmu.c

index ec4c9da..6a3ecc3 100644 (file)
@@ -256,26 +256,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
 {
        unsigned long id;
 
-       mutex_lock(&smmu->lock);
-
        id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
-       if (id >= smmu->soc->num_asids) {
-               mutex_unlock(&smmu->lock);
+       if (id >= smmu->soc->num_asids)
                return -ENOSPC;
-       }
 
        set_bit(id, smmu->asids);
        *idp = id;
 
-       mutex_unlock(&smmu->lock);
        return 0;
 }
 
 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
 {
-       mutex_lock(&smmu->lock);
        clear_bit(id, smmu->asids);
-       mutex_unlock(&smmu->lock);
 }
 
 static bool tegra_smmu_capable(enum iommu_cap cap)
@@ -420,17 +413,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
                                 struct tegra_smmu_as *as)
 {
        u32 value;
-       int err;
+       int err = 0;
+
+       mutex_lock(&smmu->lock);
 
        if (as->use_count > 0) {
                as->use_count++;
-               return 0;
+               goto unlock;
        }
 
        as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
                                  DMA_TO_DEVICE);
-       if (dma_mapping_error(smmu->dev, as->pd_dma))
-               return -ENOMEM;
+       if (dma_mapping_error(smmu->dev, as->pd_dma)) {
+               err = -ENOMEM;
+               goto unlock;
+       }
 
        /* We can't handle 64-bit DMA addresses */
        if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
@@ -453,24 +450,35 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
        as->smmu = smmu;
        as->use_count++;
 
+       mutex_unlock(&smmu->lock);
+
        return 0;
 
 err_unmap:
        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+unlock:
+       mutex_unlock(&smmu->lock);
+
        return err;
 }
 
 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
                                    struct tegra_smmu_as *as)
 {
-       if (--as->use_count > 0)
+       mutex_lock(&smmu->lock);
+
+       if (--as->use_count > 0) {
+               mutex_unlock(&smmu->lock);
                return;
+       }
 
        tegra_smmu_free_asid(smmu, as->id);
 
        dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
 
        as->smmu = NULL;
+
+       mutex_unlock(&smmu->lock);
 }
 
 static int tegra_smmu_attach_dev(struct iommu_domain *domain,