return set_resource_bit;
}
+static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ uint64_t queue_mask = ~0ULL;
+ int r, i, j;
+
+ amdgpu_device_flush_hdp(adev, NULL);
+
+ if (!adev->enable_uni_mes) {
+ spin_lock(&kiq->ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
+ if (r) {
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
+
+ kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+ if (r)
+ dev_err(adev->dev, "KIQ failed to set resources\n");
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ r = amdgpu_mes_map_legacy_queue(adev,
+ &adev->gfx.compute_ring[j]);
+ if (r) {
+ dev_err(adev->dev, "failed to map compute queue\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
uint64_t queue_mask = 0;
int r, i, j;
+ if (adev->enable_mes)
+ return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
+
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
return -EINVAL;
amdgpu_device_flush_hdp(adev, NULL);
- if (adev->enable_mes)
- queue_mask = ~0ULL;
-
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
}
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
-
- if (!adev->enable_mes) {
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- j = i + xcc_id * adev->gfx.num_compute_rings;
- kiq->pmf->kiq_map_queues(kiq_ring,
- &adev->gfx.compute_ring[j]);
- }
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[j]);
}
r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ enable failed\n");
- if (adev->enable_mes || adev->enable_uni_mes) {
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- j = i + xcc_id * adev->gfx.num_compute_rings;
- r = amdgpu_mes_map_legacy_queue(adev,
- &adev->gfx.compute_ring[j]);
- if (r) {
- DRM_ERROR("failed to map compute queue\n");
- return r;
- }
- }
-
- return 0;
- }
-
return r;
}
amdgpu_device_flush_hdp(adev, NULL);
- if (adev->enable_mes || adev->enable_uni_mes) {
+ if (adev->enable_mes) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev,
if (r)
goto failure;
+ r = mes_v12_0_hw_init(adev);
+ if (r)
+ goto failure;
+
return r;
failure:
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->mes.ring.sched.ready)
- return 0;
+ goto out;
if (!adev->enable_mes_kiq || adev->enable_uni_mes) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
goto failure;
}
+out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
- !adev->enable_uni_mes)
+ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
amdgpu_mes_self_test(adev);
return 0;