1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5 #include <linux/iommu.h>
6 #include <uapi/linux/iommufd.h>
8 #include "../iommu-priv.h"
9 #include "iommufd_private.h"
11 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
13 struct iommufd_hwpt_paging *hwpt_paging =
14 container_of(obj, struct iommufd_hwpt_paging, common.obj);
16 if (!list_empty(&hwpt_paging->hwpt_item)) {
17 mutex_lock(&hwpt_paging->ioas->mutex);
18 list_del(&hwpt_paging->hwpt_item);
19 mutex_unlock(&hwpt_paging->ioas->mutex);
21 iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
22 hwpt_paging->common.domain);
25 if (hwpt_paging->common.domain)
26 iommu_domain_free(hwpt_paging->common.domain);
28 refcount_dec(&hwpt_paging->ioas->obj.users);
31 void iommufd_hwpt_paging_abort(struct iommufd_object *obj)
33 struct iommufd_hwpt_paging *hwpt_paging =
34 container_of(obj, struct iommufd_hwpt_paging, common.obj);
36 /* The ioas->mutex must be held until finalize is called. */
37 lockdep_assert_held(&hwpt_paging->ioas->mutex);
39 if (!list_empty(&hwpt_paging->hwpt_item)) {
40 list_del_init(&hwpt_paging->hwpt_item);
41 iopt_table_remove_domain(&hwpt_paging->ioas->iopt,
42 hwpt_paging->common.domain);
44 iommufd_hwpt_paging_destroy(obj);
47 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
49 struct iommufd_hwpt_nested *hwpt_nested =
50 container_of(obj, struct iommufd_hwpt_nested, common.obj);
52 if (hwpt_nested->common.domain)
53 iommu_domain_free(hwpt_nested->common.domain);
55 refcount_dec(&hwpt_nested->parent->common.obj.users);
58 void iommufd_hwpt_nested_abort(struct iommufd_object *obj)
60 iommufd_hwpt_nested_destroy(obj);
64 iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
66 struct iommu_domain *paging_domain = hwpt_paging->common.domain;
68 if (hwpt_paging->enforce_cache_coherency)
71 if (paging_domain->ops->enforce_cache_coherency)
72 hwpt_paging->enforce_cache_coherency =
73 paging_domain->ops->enforce_cache_coherency(
75 if (!hwpt_paging->enforce_cache_coherency)
81 * iommufd_hwpt_paging_alloc() - Get a PAGING iommu_domain for a device
82 * @ictx: iommufd context
83 * @ioas: IOAS to associate the domain with
84 * @idev: Device to get an iommu_domain for
85 * @flags: Flags from userspace
86 * @immediate_attach: True if idev should be attached to the hwpt
87 * @user_data: The user provided driver specific data describing the domain to
90 * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
91 * will be linked to the given ioas and upon return the underlying iommu_domain
92 * is fully popoulated.
94 * The caller must hold the ioas->mutex until after
95 * iommufd_object_abort_and_destroy() or iommufd_object_finalize() is called on
98 struct iommufd_hwpt_paging *
99 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
100 struct iommufd_device *idev, u32 flags,
101 bool immediate_attach,
102 const struct iommu_user_data *user_data)
104 const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
105 IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
106 const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
107 struct iommufd_hwpt_paging *hwpt_paging;
108 struct iommufd_hw_pagetable *hwpt;
111 lockdep_assert_held(&ioas->mutex);
113 if ((flags || user_data) && !ops->domain_alloc_user)
114 return ERR_PTR(-EOPNOTSUPP);
115 if (flags & ~valid_flags)
116 return ERR_PTR(-EOPNOTSUPP);
118 hwpt_paging = __iommufd_object_alloc(
119 ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
120 if (IS_ERR(hwpt_paging))
121 return ERR_CAST(hwpt_paging);
122 hwpt = &hwpt_paging->common;
124 INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
125 /* Pairs with iommufd_hw_pagetable_destroy() */
126 refcount_inc(&ioas->obj.users);
127 hwpt_paging->ioas = ioas;
128 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
130 if (ops->domain_alloc_user) {
131 hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL,
133 if (IS_ERR(hwpt->domain)) {
134 rc = PTR_ERR(hwpt->domain);
139 hwpt->domain = iommu_domain_alloc(idev->dev->bus);
147 * Set the coherency mode before we do iopt_table_add_domain() as some
148 * iommus have a per-PTE bit that controls it and need to decide before
149 * doing any maps. It is an iommu driver bug to report
150 * IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail enforce_cache_coherency on
153 * The cache coherency mode must be configured here and unchanged later.
154 * Note that a HWPT (non-CC) created for a device (non-CC) can be later
155 * reused by another device (either non-CC or CC). However, A HWPT (CC)
156 * created for a device (CC) cannot be reused by another device (non-CC)
157 * but only devices (CC). Instead user space in this case would need to
158 * allocate a separate HWPT (non-CC).
160 if (idev->enforce_cache_coherency) {
161 rc = iommufd_hwpt_paging_enforce_cc(hwpt_paging);
167 * immediate_attach exists only to accommodate iommu drivers that cannot
168 * directly allocate a domain. These drivers do not finish creating the
169 * domain until attach is completed. Thus we must have this call
170 * sequence. Once those drivers are fixed this should be removed.
172 if (immediate_attach) {
173 rc = iommufd_hw_pagetable_attach(hwpt, idev);
178 rc = iopt_table_add_domain(&ioas->iopt, hwpt->domain);
181 list_add_tail(&hwpt_paging->hwpt_item, &ioas->hwpt_list);
185 if (immediate_attach)
186 iommufd_hw_pagetable_detach(idev);
188 iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
193 * iommufd_hwpt_nested_alloc() - Get a NESTED iommu_domain for a device
194 * @ictx: iommufd context
195 * @parent: Parent PAGING-type hwpt to associate the domain with
196 * @idev: Device to get an iommu_domain for
197 * @flags: Flags from userspace
198 * @user_data: user_data pointer. Must be valid
200 * Allocate a new iommu_domain (must be IOMMU_DOMAIN_NESTED) and return it as
201 * a NESTED hw_pagetable. The given parent PAGING-type hwpt must be capable of
204 static struct iommufd_hwpt_nested *
205 iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
206 struct iommufd_hwpt_paging *parent,
207 struct iommufd_device *idev, u32 flags,
208 const struct iommu_user_data *user_data)
210 const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
211 struct iommufd_hwpt_nested *hwpt_nested;
212 struct iommufd_hw_pagetable *hwpt;
215 if (flags || !user_data->len || !ops->domain_alloc_user)
216 return ERR_PTR(-EOPNOTSUPP);
217 if (parent->auto_domain || !parent->nest_parent)
218 return ERR_PTR(-EINVAL);
220 hwpt_nested = __iommufd_object_alloc(
221 ictx, hwpt_nested, IOMMUFD_OBJ_HWPT_NESTED, common.obj);
222 if (IS_ERR(hwpt_nested))
223 return ERR_CAST(hwpt_nested);
224 hwpt = &hwpt_nested->common;
226 refcount_inc(&parent->common.obj.users);
227 hwpt_nested->parent = parent;
229 hwpt->domain = ops->domain_alloc_user(idev->dev, flags,
230 parent->common.domain, user_data);
231 if (IS_ERR(hwpt->domain)) {
232 rc = PTR_ERR(hwpt->domain);
237 if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
244 iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
248 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
250 struct iommu_hwpt_alloc *cmd = ucmd->cmd;
251 const struct iommu_user_data user_data = {
252 .type = cmd->data_type,
253 .uptr = u64_to_user_ptr(cmd->data_uptr),
254 .len = cmd->data_len,
256 struct iommufd_hw_pagetable *hwpt;
257 struct iommufd_ioas *ioas = NULL;
258 struct iommufd_object *pt_obj;
259 struct iommufd_device *idev;
264 if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
267 idev = iommufd_get_device(ucmd, cmd->dev_id);
269 return PTR_ERR(idev);
271 pt_obj = iommufd_get_object(ucmd->ictx, cmd->pt_id, IOMMUFD_OBJ_ANY);
272 if (IS_ERR(pt_obj)) {
277 if (pt_obj->type == IOMMUFD_OBJ_IOAS) {
278 struct iommufd_hwpt_paging *hwpt_paging;
280 ioas = container_of(pt_obj, struct iommufd_ioas, obj);
281 mutex_lock(&ioas->mutex);
282 hwpt_paging = iommufd_hwpt_paging_alloc(
283 ucmd->ictx, ioas, idev, cmd->flags, false,
284 user_data.len ? &user_data : NULL);
285 if (IS_ERR(hwpt_paging)) {
286 rc = PTR_ERR(hwpt_paging);
289 hwpt = &hwpt_paging->common;
290 } else if (pt_obj->type == IOMMUFD_OBJ_HWPT_PAGING) {
291 struct iommufd_hwpt_nested *hwpt_nested;
293 hwpt_nested = iommufd_hwpt_nested_alloc(
295 container_of(pt_obj, struct iommufd_hwpt_paging,
297 idev, cmd->flags, &user_data);
298 if (IS_ERR(hwpt_nested)) {
299 rc = PTR_ERR(hwpt_nested);
302 hwpt = &hwpt_nested->common;
308 cmd->out_hwpt_id = hwpt->obj.id;
309 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
312 iommufd_object_finalize(ucmd->ictx, &hwpt->obj);
316 iommufd_object_abort_and_destroy(ucmd->ictx, &hwpt->obj);
319 mutex_unlock(&ioas->mutex);
321 iommufd_put_object(pt_obj);
323 iommufd_put_object(&idev->obj);
327 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
329 struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd;
330 struct iommufd_hwpt_paging *hwpt_paging;
331 struct iommufd_ioas *ioas;
332 int rc = -EOPNOTSUPP;
335 if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE)
338 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
339 if (IS_ERR(hwpt_paging))
340 return PTR_ERR(hwpt_paging);
342 ioas = hwpt_paging->ioas;
343 enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE;
345 rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
348 iommufd_put_object(&hwpt_paging->common.obj);
352 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
354 struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd;
355 struct iommufd_hwpt_paging *hwpt_paging;
356 struct iommufd_ioas *ioas;
357 int rc = -EOPNOTSUPP;
359 if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) ||
363 hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
364 if (IS_ERR(hwpt_paging))
365 return PTR_ERR(hwpt_paging);
367 ioas = hwpt_paging->ioas;
368 rc = iopt_read_and_clear_dirty_data(
369 &ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
371 iommufd_put_object(&hwpt_paging->common.obj);