Merge branches 'iommu/fixes', 'arm/mediatek', 'arm/smmu', 'arm/exynos', 'unisoc'...
[linux-2.6-microblaze.git] / drivers / iommu / arm / arm-smmu-v3 / arm-smmu-v3.c
index 8594b4a..6f8ffc8 100644 (file)
@@ -245,8 +245,6 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
                break;
        case CMDQ_OP_PREFETCH_CFG:
                cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
-               cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
-               cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
                break;
        case CMDQ_OP_CFGI_CD:
                cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
@@ -909,8 +907,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_for_each_entry(master, &smmu_domain->devices, domain_head) {
-               for (i = 0; i < master->num_sids; i++) {
-                       cmd.cfgi.sid = master->sids[i];
+               for (i = 0; i < master->num_streams; i++) {
+                       cmd.cfgi.sid = master->streams[i].id;
                        arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
                }
        }
@@ -1355,6 +1353,29 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
        return 0;
 }
 
+__maybe_unused
+static struct arm_smmu_master *
+arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
+{
+       struct rb_node *node;
+       struct arm_smmu_stream *stream;
+
+       lockdep_assert_held(&smmu->streams_mutex);
+
+       node = smmu->streams.rb_node;
+       while (node) {
+               stream = rb_entry(node, struct arm_smmu_stream, node);
+               if (stream->id < sid)
+                       node = node->rb_right;
+               else if (stream->id > sid)
+                       node = node->rb_left;
+               else
+                       return stream->master;
+       }
+
+       return NULL;
+}
+
 /* IRQ and event handlers */
 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
 {
@@ -1588,8 +1609,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
 
        arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
 
-       for (i = 0; i < master->num_sids; i++) {
-               cmd.atc.sid = master->sids[i];
+       for (i = 0; i < master->num_streams; i++) {
+               cmd.atc.sid = master->streams[i].id;
                arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
        }
 
@@ -1632,8 +1653,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
                if (!master->ats_enabled)
                        continue;
 
-               for (i = 0; i < master->num_sids; i++) {
-                       cmd.atc.sid = master->sids[i];
+               for (i = 0; i < master->num_streams; i++) {
+                       cmd.atc.sid = master->streams[i].id;
                        arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
                }
        }
@@ -2017,7 +2038,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
                .iommu_dev      = smmu->dev,
        };
 
-       if (smmu_domain->non_strict)
+       if (!iommu_get_dma_strict(domain))
                pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
 
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2065,13 +2086,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
        int i, j;
        struct arm_smmu_device *smmu = master->smmu;
 
-       for (i = 0; i < master->num_sids; ++i) {
-               u32 sid = master->sids[i];
+       for (i = 0; i < master->num_streams; ++i) {
+               u32 sid = master->streams[i].id;
                __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
 
                /* Bridged PCI devices may end up with duplicated IDs */
                for (j = 0; j < i; j++)
-                       if (master->sids[j] == sid)
+                       if (master->streams[j].id == sid)
                                break;
                if (j < i)
                        continue;
@@ -2305,6 +2326,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
+       if (!gather->pgsize)
+               return;
+
        arm_smmu_tlb_inv_range_domain(gather->start,
                                      gather->end - gather->start + 1,
                                      gather->pgsize, true, smmu_domain);
@@ -2345,11 +2369,101 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
        return sid < limit;
 }
 
+static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
+                                 struct arm_smmu_master *master)
+{
+       int i;
+       int ret = 0;
+       struct arm_smmu_stream *new_stream, *cur_stream;
+       struct rb_node **new_node, *parent_node = NULL;
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+       master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
+                                 GFP_KERNEL);
+       if (!master->streams)
+               return -ENOMEM;
+       master->num_streams = fwspec->num_ids;
+
+       mutex_lock(&smmu->streams_mutex);
+       for (i = 0; i < fwspec->num_ids; i++) {
+               u32 sid = fwspec->ids[i];
+
+               new_stream = &master->streams[i];
+               new_stream->id = sid;
+               new_stream->master = master;
+
+               /*
+                * Check the SIDs are in range of the SMMU and our stream table
+                */
+               if (!arm_smmu_sid_in_range(smmu, sid)) {
+                       ret = -ERANGE;
+                       break;
+               }
+
+               /* Ensure l2 strtab is initialised */
+               if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+                       ret = arm_smmu_init_l2_strtab(smmu, sid);
+                       if (ret)
+                               break;
+               }
+
+               /* Insert into SID tree */
+               new_node = &(smmu->streams.rb_node);
+               while (*new_node) {
+                       cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
+                                             node);
+                       parent_node = *new_node;
+                       if (cur_stream->id > new_stream->id) {
+                               new_node = &((*new_node)->rb_left);
+                       } else if (cur_stream->id < new_stream->id) {
+                               new_node = &((*new_node)->rb_right);
+                       } else {
+                               dev_warn(master->dev,
+                                        "stream %u already in tree\n",
+                                        cur_stream->id);
+                               ret = -EINVAL;
+                               break;
+                       }
+               }
+               if (ret)
+                       break;
+
+               rb_link_node(&new_stream->node, parent_node, new_node);
+               rb_insert_color(&new_stream->node, &smmu->streams);
+       }
+
+       if (ret) {
+               for (i--; i >= 0; i--)
+                       rb_erase(&master->streams[i].node, &smmu->streams);
+               kfree(master->streams);
+       }
+       mutex_unlock(&smmu->streams_mutex);
+
+       return ret;
+}
+
+static void arm_smmu_remove_master(struct arm_smmu_master *master)
+{
+       int i;
+       struct arm_smmu_device *smmu = master->smmu;
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+
+       if (!smmu || !master->streams)
+               return;
+
+       mutex_lock(&smmu->streams_mutex);
+       for (i = 0; i < fwspec->num_ids; i++)
+               rb_erase(&master->streams[i].node, &smmu->streams);
+       mutex_unlock(&smmu->streams_mutex);
+
+       kfree(master->streams);
+}
+
 static struct iommu_ops arm_smmu_ops;
 
 static struct iommu_device *arm_smmu_probe_device(struct device *dev)
 {
-       int i, ret;
+       int ret;
        struct arm_smmu_device *smmu;
        struct arm_smmu_master *master;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -2370,29 +2484,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
 
        master->dev = dev;
        master->smmu = smmu;
-       master->sids = fwspec->ids;
-       master->num_sids = fwspec->num_ids;
        INIT_LIST_HEAD(&master->bonds);
        dev_iommu_priv_set(dev, master);
 
-       /* Check the SIDs are in range of the SMMU and our stream table */
-       for (i = 0; i < master->num_sids; i++) {
-               u32 sid = master->sids[i];
-
-               if (!arm_smmu_sid_in_range(smmu, sid)) {
-                       ret = -ERANGE;
-                       goto err_free_master;
-               }
-
-               /* Ensure l2 strtab is initialised */
-               if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
-                       ret = arm_smmu_init_l2_strtab(smmu, sid);
-                       if (ret)
-                               goto err_free_master;
-               }
-       }
+       ret = arm_smmu_insert_master(smmu, master);
+       if (ret)
+               goto err_free_master;
 
-       master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+       device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits);
+       master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
 
        /*
         * Note that PASID must be enabled before, and disabled after ATS:
@@ -2428,6 +2528,7 @@ static void arm_smmu_release_device(struct device *dev)
        WARN_ON(arm_smmu_master_sva_enabled(master));
        arm_smmu_detach_dev(master);
        arm_smmu_disable_pasid(master);
+       arm_smmu_remove_master(master);
        kfree(master);
        iommu_fwspec_free(dev);
 }
@@ -2449,76 +2550,18 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
        return group;
 }
 
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
-                                   enum iommu_attr attr, void *data)
+static int arm_smmu_enable_nesting(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-       switch (domain->type) {
-       case IOMMU_DOMAIN_UNMANAGED:
-               switch (attr) {
-               case DOMAIN_ATTR_NESTING:
-                       *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
-                       return 0;
-               default:
-                       return -ENODEV;
-               }
-               break;
-       case IOMMU_DOMAIN_DMA:
-               switch (attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       *(int *)data = smmu_domain->non_strict;
-                       return 0;
-               default:
-                       return -ENODEV;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
-                                   enum iommu_attr attr, void *data)
-{
        int ret = 0;
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
        mutex_lock(&smmu_domain->init_mutex);
-
-       switch (domain->type) {
-       case IOMMU_DOMAIN_UNMANAGED:
-               switch (attr) {
-               case DOMAIN_ATTR_NESTING:
-                       if (smmu_domain->smmu) {
-                               ret = -EPERM;
-                               goto out_unlock;
-                       }
-
-                       if (*(int *)data)
-                               smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
-                       else
-                               smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-                       break;
-               default:
-                       ret = -ENODEV;
-               }
-               break;
-       case IOMMU_DOMAIN_DMA:
-               switch(attr) {
-               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       smmu_domain->non_strict = *(int *)data;
-                       break;
-               default:
-                       ret = -ENODEV;
-               }
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-out_unlock:
+       if (smmu_domain->smmu)
+               ret = -EPERM;
+       else
+               smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
        mutex_unlock(&smmu_domain->init_mutex);
+
        return ret;
 }
 
@@ -2619,8 +2662,7 @@ static struct iommu_ops arm_smmu_ops = {
        .probe_device           = arm_smmu_probe_device,
        .release_device         = arm_smmu_release_device,
        .device_group           = arm_smmu_device_group,
-       .domain_get_attr        = arm_smmu_domain_get_attr,
-       .domain_set_attr        = arm_smmu_domain_set_attr,
+       .enable_nesting         = arm_smmu_enable_nesting,
        .of_xlate               = arm_smmu_of_xlate,
        .get_resv_regions       = arm_smmu_get_resv_regions,
        .put_resv_regions       = generic_iommu_put_resv_regions,
@@ -2851,6 +2893,9 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
 {
        int ret;
 
+       mutex_init(&smmu->streams_mutex);
+       smmu->streams = RB_ROOT;
+
        ret = arm_smmu_init_queues(smmu);
        if (ret)
                return ret;