goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
.create_handle = drm_gem_fb_create_handle,
};
-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
{
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled);
- domain = amdgpu_display_framebuffer_domains(adev);
+ domain = amdgpu_display_supported_domains(adev);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { true, false };
- u32 domain = amdgpu_display_framebuffer_domains(adev);
+ u32 domain = amdgpu_display_supported_domains(adev);
int ret;
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
return r;
if (plane->type != DRM_PLANE_TYPE_CURSOR)
- domain = amdgpu_display_framebuffer_domains(adev);
+ domain = amdgpu_display_supported_domains(adev);
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
r = amdgpu_bo_pin(rbo, domain, &afb->address);
-
amdgpu_bo_unreserve(rbo);
if (unlikely(r != 0)) {