2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
30 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
31 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
33 int (*run_func)(void *handle, uint32_t inst_mask);
36 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
42 case AMDGPU_XCP_PREPARE_SUSPEND:
43 run_func = xcp_ip->ip_funcs->prepare_suspend;
45 case AMDGPU_XCP_SUSPEND:
46 run_func = xcp_ip->ip_funcs->suspend;
48 case AMDGPU_XCP_PREPARE_RESUME:
49 run_func = xcp_ip->ip_funcs->prepare_resume;
51 case AMDGPU_XCP_RESUME:
52 run_func = xcp_ip->ip_funcs->resume;
57 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
62 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
65 struct amdgpu_xcp_ip *xcp_ip;
66 struct amdgpu_xcp *xcp;
69 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
72 xcp = &xcp_mgr->xcp[xcp_id];
73 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
75 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
83 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
85 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
86 AMDGPU_XCP_PREPARE_SUSPEND);
89 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
91 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
94 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
96 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
97 AMDGPU_XCP_PREPARE_RESUME);
100 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
102 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
105 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
106 struct amdgpu_xcp_ip *ip)
108 struct amdgpu_xcp *xcp;
113 xcp = &xcp_mgr->xcp[xcp_id];
114 xcp->ip[ip->ip_id] = *ip;
115 xcp->ip[ip->ip_id].valid = true;
120 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
122 struct amdgpu_device *adev = xcp_mgr->adev;
123 struct amdgpu_xcp_ip ip;
127 if (!num_xcps || num_xcps > MAX_XCP)
130 xcp_mgr->mode = mode;
132 for (i = 0; i < MAX_XCP; ++i)
133 xcp_mgr->xcp[i].valid = false;
135 /* This is needed for figuring out memory id of xcp */
136 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
138 for (i = 0; i < num_xcps; ++i) {
139 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
140 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
145 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
148 xcp_mgr->xcp[i].id = i;
150 if (xcp_mgr->funcs->get_xcp_mem_id) {
151 ret = xcp_mgr->funcs->get_xcp_mem_id(
152 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
156 xcp_mgr->xcp[i].mem_id = mem_id;
160 xcp_mgr->num_xcps = num_xcps;
161 amdgpu_xcp_update_partition_sched_list(adev);
166 static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
169 int ret, curr_mode, num_xcps = 0;
171 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
174 mutex_lock(&xcp_mgr->xcp_lock);
176 curr_mode = xcp_mgr->mode;
177 /* State set to transient mode */
178 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
180 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
183 /* Failed, get whatever mode it's at now */
184 if (xcp_mgr->funcs->query_partition_mode)
185 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
186 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
188 xcp_mgr->mode = curr_mode;
194 mutex_unlock(&xcp_mgr->xcp_lock);
199 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
201 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
204 if (xcp_mgr->mode == mode)
207 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
210 int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
212 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
215 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
218 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
222 if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
223 return xcp_mgr->mode;
225 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
226 return xcp_mgr->mode;
228 if (!(flags & AMDGPU_XCP_FL_LOCKED))
229 mutex_lock(&xcp_mgr->xcp_lock);
230 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
231 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
234 "Cached partition mode %d not matching with device mode %d",
235 xcp_mgr->mode, mode);
237 if (!(flags & AMDGPU_XCP_FL_LOCKED))
238 mutex_unlock(&xcp_mgr->xcp_lock);
243 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
245 struct drm_device *p_ddev;
246 struct drm_device *ddev;
249 ddev = adev_to_drm(adev);
251 /* xcp #0 shares drm device setting with adev */
252 adev->xcp_mgr->xcp->ddev = ddev;
254 for (i = 1; i < MAX_XCP; i++) {
255 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
256 if (ret == -ENOSPC) {
258 "Skip xcp node #%d when out of drm node resource.", i);
264 /* Redirect all IOCTLs to the primary device */
265 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
266 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
267 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
268 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
269 p_ddev->render->dev = ddev;
270 p_ddev->primary->dev = ddev;
271 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
272 p_ddev->driver = &amdgpu_partition_driver;
273 adev->xcp_mgr->xcp[i].ddev = p_ddev;
279 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
281 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
283 struct amdgpu_xcp_mgr *xcp_mgr;
285 if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
286 !xcp_funcs->get_ip_details)
289 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
294 xcp_mgr->adev = adev;
295 xcp_mgr->funcs = xcp_funcs;
296 xcp_mgr->mode = init_mode;
297 mutex_init(&xcp_mgr->xcp_lock);
299 if (init_mode != AMDGPU_XCP_MODE_NONE)
300 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
302 adev->xcp_mgr = xcp_mgr;
304 return amdgpu_xcp_dev_alloc(adev);
307 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
308 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
310 struct amdgpu_xcp *xcp;
313 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
316 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
317 xcp = &xcp_mgr->xcp[i];
318 if ((xcp->valid) && (xcp->ip[ip].valid) &&
319 (xcp->ip[ip].inst_mask & BIT(instance)))
329 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
330 enum AMDGPU_XCP_IP_BLOCK ip,
333 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
336 *inst_mask = xcp->ip[ip].inst_mask;
341 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
342 const struct pci_device_id *ent)
349 for (i = 1; i < MAX_XCP; i++) {
350 if (!adev->xcp_mgr->xcp[i].ddev)
353 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
361 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
363 struct drm_device *p_ddev;
369 for (i = 1; i < MAX_XCP; i++) {
370 if (!adev->xcp_mgr->xcp[i].ddev)
373 p_ddev = adev->xcp_mgr->xcp[i].ddev;
374 drm_dev_unplug(p_ddev);
375 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
376 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
377 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
378 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
382 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
383 struct amdgpu_fpriv *fpriv,
384 struct drm_file *file_priv)
391 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
392 for (i = 0; i < MAX_XCP; ++i) {
393 if (!adev->xcp_mgr->xcp[i].ddev)
396 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
397 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
398 dev_err(adev->dev, "renderD%d partition %d not valid!",
399 file_priv->minor->index, i);
402 dev_dbg(adev->dev, "renderD%d partition %d opened!",
403 file_priv->minor->index, i);
409 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
410 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
414 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
415 struct amdgpu_ctx_entity *entity)
417 struct drm_gpu_scheduler *sched;
418 struct amdgpu_ring *ring;
423 sched = entity->entity.rq->sched;
425 ring = to_amdgpu_ring(entity->entity.rq->sched);
426 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);