{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
- return 0;
+ return -EBADRQC;
}
if (is_shadowed_mmio(offset)) {
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ /* TODO
+ * Right now only scan LRI command on KBL and in inhibit context.
+ * It's good enough to support initializing mmio by lri command in
+ * vgpu inhibit context on KBL.
+ */
+ if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
+ intel_gvt_hypervisor_read_gpa(s->vgpu,
+ s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+ /* check inhibit context */
+ if (ctx_sr_ctl & 1) {
+ u32 data = cmd_val(s, index + 1);
+
+ if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+ intel_vgpu_mask_mmio_write(vgpu,
+ offset, &data, 4);
+ else
+ vgpu_vreg(vgpu, offset) = data;
+ }
+ }
+
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
return ret;
}
+ static int mi_noop_index;
+
static struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ /* fastpath for MI_NOOP */
+ if (cmd == MI_NOOP)
+ info = &cmd_info[mi_noop_index];
+ else
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
cmd, get_opcode(cmd, s->ring_id),
kfree(e);
return -EEXIST;
}
+ if (cmd_info[i].opcode == OP_MI_NOOP)
+ mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
/**
* intel_vgpu_init_display- initialize vGPU virtual display emulation
* @vgpu: a vGPU
+ * @resolution: resolution index for intel_vgpu_edid
*
* This function is used to initialize vGPU virtual display emulation stuffs
*
}
/**
+ * Check if can do 2M page
+ * @vgpu: target vgpu
+ * @entry: target pfn's gtt entry
+ *
* Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
* negtive if found err.
*/
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
+ mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
+ mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
/**
* intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
- * @vgpu: a vGPU
+ * @mm: target vgpu mm
*
* This function is called when user wants to use a vGPU mm object. If this
* mm object hasn't been shadowed yet, the shadow will be populated at this
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+ * write, we assume the two 4 bytes writes are consecutive.
+ * Otherwise, we abort and report error
+ */
+ if (bytes < info->gtt_entry_size) {
+ if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+ /* the first partial part*/
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ return 0;
+ } else if ((g_gtt_index ==
+ (ggtt_mm->ggtt_mm.last_partial_off >>
+ info->gtt_entry_size_shift)) &&
+ (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+ /* the second partial part */
+
+ int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+ last_off, bytes);
+
+ ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+ } else {
+ int last_offset;
+
+ gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+ ggtt_mm->ggtt_mm.last_partial_off, off,
+ bytes, info->gtt_entry_size);
+
+ /* set host ggtt entry to scratch page and clear
+ * virtual ggtt entry as not present for last
+ * partially write offset
+ */
+ last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+ (~(info->gtt_entry_size - 1));
+
+ ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate_pte(vgpu, &m);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate(gvt->dev_priv);
+
+ ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+ ops->clear_present(&e);
+ ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+ return 0;
+ }
+ }
+
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;
/**
* intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
* @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
- * @root_entry: PPGTT page table root pointers
+ * @pdps: pdp root array
*
* This function is used to find a PPGTT mm object from mm object pool
*
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
- * @gvt: intel gvt device
*
* This function is called at the driver loading stage. If failed to find a
* loadable MPT module or detect currently we're running in a VM, then GVT-g
/**
* intel_gvt_clean_device - clean a GVT device
- * @gvt: intel gvt device
+ * @dev_priv: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
if (WARN_ON(!gvt))
return;
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
- intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
-
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
- intel_gvt_clean_vgpu_types(gvt);
-
+ intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
{
write_vreg(vgpu, offset, p_data, bytes);
- if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
+ if (vgpu_vreg(vgpu, offset) &
+ HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
vgpu_vreg(vgpu, offset) |=
- HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
+ HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
else
vgpu_vreg(vgpu, offset) &=
- ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
+ ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
return 0;
}
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
- MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
- /*
- * Use an arbitrary power well controlled by the PWR_WELL_CTL
- * register.
- */
- MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
- power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
- power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
- power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- /*
- * Use an arbitrary power well controlled by the PWR_WELL_CTL
- * register.
- */
- MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
- MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
- skl_power_well_ctl_write);
+ MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
return 0;
}
+/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mask, old_vreg;
+
+ old_vreg = vgpu_vreg(vgpu, offset);
+ write_vreg(vgpu, offset, p_data, bytes);
+ mask = vgpu_vreg(vgpu, offset) >> 16;
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+ (vgpu_vreg(vgpu, offset) & mask);
+
+ return 0;
+}
+
/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
* @offset: register offset
* @pdata: data buffer
* @bytes: data length
+ * @is_read: read or write
*
* Returns:
* Zero on success, negative error code if failed.
#include <linux/mdev.h>
#include <linux/debugfs.h>
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "gvt.h"
/* Setup DMA mapping. */
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev, *dma_addr);
- if (ret) {
+ if (dma_mapping_error(dev, *dma_addr)) {
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
page_to_pfn(page), ret);
gvt_unpin_guest_page(vgpu, gfn, size);
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
- intel_gvt_ops->vgpu_deactivate(vgpu);
+ intel_gvt_ops->vgpu_release(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ unsigned int i;
+ int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
size_t size;
int nr_areas = 1;
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
return -EINVAL;
+ info.index =
+ array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
&sparse->header, sizeof(*sparse) +
(sparse->nr_areas *
sizeof(*sparse->areas)));
- kfree(sparse);
- if (ret)
+ if (ret) {
+ kfree(sparse);
return ret;
+ }
break;
default:
+ kfree(sparse);
return -EINVAL;
}
}
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
+ kfree(sparse);
return -EFAULT;
}
info.cap_offset = sizeof(info);
kfree(caps.buf);
}
+ kfree(sparse);
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write;
return pfn;
}
- int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
struct kvmgt_guest_info *info;
__gvt_cache_remove_entry(entry->vgpu, entry);
}
- void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+ static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
struct gvt_dma *entry;
#include "gvt.h"
#include "trace.h"
- /**
- * Defined in Intel Open Source PRM.
- * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
- */
- #define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
- #define TRNULLDETCT _MMIO(0x4de8)
- #define TRINVTILEDETCT _MMIO(0x4dec)
- #define TRVADR _MMIO(0x4df0)
- #define TRTTE _MMIO(0x4df4)
- #define RING_EXCC(base) _MMIO((base) + 0x28)
- #define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
- #define VF_GUARDBAND _MMIO(0x83a4)
-
#define GEN9_MOCS_SIZE 64
/* Raw offset is appened to each line for convenience. */
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->in_context)
+ if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ }
}
}
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
- context_page_num = context_page_num >> PAGE_SHIFT;
-
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
- context_page_num = 19;
-
- i = 2;
-
- while (i < context_page_num) {
- context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- (u32)((workload->ctx_desc.lrca + i) <<
- I915_GTT_PAGE_SHIFT));
- if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("Invalid guest context descriptor\n");
- return -EFAULT;
- }
-
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
- dst = kmap(page);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
- }
-
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page);
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
kunmap(page);
+
+ if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+ return 0;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ I915_GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("Invalid guest context descriptor\n");
+ return -EFAULT;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ I915_GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
return 0;
}
kunmap(page);
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
}
workload->complete(workload);
if (!s->active)
return;
- clean_workloads(vgpu, engine_mask);
+ intel_vgpu_clean_workloads(vgpu, engine_mask);
s->ops->reset(vgpu, engine_mask);
}
/**
* intel_vgpu_select_submission_ops - select virtual submission interface
* @vgpu: a vGPU
+ * @engine_mask: either ALL_ENGINES or target engine mask
* @interface: expected vGPU virtual submission interface
*
* This function is called when guest configures submission interface.
/**
* intel_vgpu_destroy_workload - destroy a vGPU workload
- * @vgpu: a vGPU
+ * @workload: workload to destroy
*
* This function is called when destroy a vGPU workload.
*
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
+ * @ring_id: ring index
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
}
}
+ bool is_ccs_modifier(u64 modifier)
+ {
+ return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ }
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
return ret;
}
- if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
+ if (is_ccs_modifier(fb->modifier) && i == 1) {
int hsub = fb->format->hsub;
int vsub = fb->format->vsub;
int tile_width, tile_height;
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
*/
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ if (is_ccs_modifier(fb->modifier)) {
while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
if (offset == 0)
break;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
- } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ } else if (is_ccs_modifier(fb->modifier)) {
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
- return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
+ return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
+ return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
default:
MISSING_CASE(fb_modifier);
}
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->pcu_lock);
- /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
- 42))
+ 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
else
fb->modifier = I915_FORMAT_MOD_Yf_TILED;
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
"Display power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
}
- static int i845_check_cursor(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
return true;
}
- static int i9xx_check_cursor(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
* down.
*/
INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
- schedule_work(&state->commit_work);
+ queue_work(system_highpri_wq, &state->commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
.set_crc_source = intel_crtc_set_crc_source,
+ .verify_crc_source = intel_crtc_verify_crc_source,
+ .get_crc_sources = intel_crtc_get_crc_sources,
};
struct wait_rps_boost {
INTEL_INFO(dev_priv)->cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
+ int err;
- return i915_gem_object_attach_phys(obj, align);
+ err = i915_gem_object_attach_phys(obj, align);
+ if (err)
+ return err;
}
vma = intel_pin_and_fence_fb_obj(fb,
}
static int
- intel_check_primary_plane(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+ intel_check_primary_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct intel_plane *plane = to_intel_plane(state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_crtc *crtc = state->base.crtc;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+ if (is_ccs_modifier(modifier))
return true;
/* fall through */
case DRM_FORMAT_RGB565:
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
- if (plane_id == PLANE_PRIMARY) {
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
- else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) &&
- !IS_GEMINILAKE(dev_priv))
- return false;
- } else if (plane_id >= PLANE_SPRITE0) {
- if (plane_id == PLANE_CURSOR)
- return false;
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) {
- if (plane_id != PLANE_SPRITE0)
- return false;
- } else {
- if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C ||
- IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
- }
- }
+ /*
+ * FIXME: ICL requires two hardware planes for scanning out NV12
+ * framebuffers. Do not advertize support until this is implemented.
+ */
+ if (INTEL_GEN(dev_priv) >= 11)
+ return false;
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
return true;
}
intel_pps_init(dev_priv);
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return;
+
/*
* intel_edp_init_connector() depends on this completing first, to
* prevent the registeration of both eDP and LVDS and the incorrect
break;
case DRM_FORMAT_NV12:
if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv)) {
+ IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format,
&format_name));
* potential runtime errors at plane configuration time.
*/
if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
+ is_ccs_modifier(fb->modifier))
stride_alignment *= 4;
if (fb->pitches[i] & (stride_alignment - 1)) {
DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
}
+ static int intel_initial_commit(struct drm_device *dev)
+ {
+ struct drm_atomic_state *state = NULL;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ retry:
+ state->acquire_ctx = &ctx;
+
+ drm_for_each_crtc(crtc, dev) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ if (crtc_state->active) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = drm_atomic_commit(state);
+
+ out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+ }
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
enum pipe pipe;
struct intel_crtc *crtc;
+ int ret;
dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
intel_init_pm(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
- return 0;
-
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
- int ret;
-
ret = intel_crtc_init(dev_priv, pipe);
if (ret) {
drm_mode_config_cleanup(dev);
if (!HAS_GMCH_DISPLAY(dev_priv))
sanitize_watermarks(dev);
+ /*
+ * Force all active planes to recompute their states. So that on
+ * mode_setcrtc after probe, all the intel_plane_state variables
+ * are already calculated and there is no assert_plane warnings
+ * during bootup.
+ */
+ ret = intel_initial_commit(dev);
+ if (ret)
+ DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+
return 0;
}
struct intel_encoder *encoder;
int i;
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
- intel_display_set_init_power(dev_priv, false);
- intel_power_domains_verify_state(dev_priv);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_fbc_init_pipe_state(dev_priv);
}
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
- intel_disable_gt_powersave(dev_priv);
-
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
intel_cleanup_overlay(dev_priv);
- intel_cleanup_gt_powersave(dev_priv);
-
intel_teardown_gmbus(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver =
- I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
+ error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
#ifndef _INTEL_DISPLAY_H_
#define _INTEL_DISPLAY_H_
+ enum i915_gpio {
+ GPIOA,
+ GPIOB,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ GPIOF,
+ GPIOG,
+ GPIOH,
+ __GPIOI_UNUSED,
+ GPIOJ,
+ GPIOK,
+ GPIOL,
+ GPIOM,
+ };
+
enum pipe {
INVALID_PIPE = -1,
#define port_name(p) ((p) + 'A')
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return "Port A";
+ case PORT_B:
+ return "Port B";
+ case PORT_C:
+ return "Port C";
+ case PORT_D:
+ return "Port D";
+ case PORT_E:
+ return "Port E";
+ case PORT_F:
+ return "Port F";
+ default:
+ return "<invalid>";
+ }
+}
+
enum tc_port {
PORT_TC_NONE = -1,
I915_MAX_TC_PORTS
};
+ enum tc_port_type {
+ TC_PORT_UNKNOWN = 0,
+ TC_PORT_TYPEC,
+ TC_PORT_TBT,
+ TC_PORT_LEGACY,
+ };
+
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
for_each_power_well(__dev_priv, __power_well) \
- for_each_if((__power_well)->domains & (__domain_mask))
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
for_each_power_well_rev(__dev_priv, __power_well) \
- for_each_if((__power_well)->domains & (__domain_mask))
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
struct intel_link_m_n *m_n,
bool reduce_m_n);
+ bool is_ccs_modifier(u64 modifier);
#endif
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
+#include <media/cec-notifier.h>
/**
* __wait_for - magic wait macro
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
- int (*check_plane)(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *state);
+ int (*check_plane)(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
};
struct intel_watermark_params {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
+ struct cec_notifier *cec_notifier;
};
struct intel_dp_mst_encoder;
bool release_cl2_override;
uint8_t max_lanes;
enum intel_display_power_domain ddi_io_power_domain;
+ enum tc_port_type tc_type;
void (*write_infoframe)(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
return &dp_to_dig_port(intel_dp)->lspcon;
}
+ static inline struct drm_i915_private *
+ dp_to_i915(struct intel_dp *intel_dp)
+ {
+ return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ }
+
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
+ void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
+ void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
+ void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
+ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
+ struct drm_modeset_acquire_ctx *ctx,
+ u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
- void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
+ void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
- int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
+ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
- void intel_power_domains_fini(struct drm_i915_private *);
+ void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
- void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
- void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+ void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+ void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+
+ enum i915_drm_suspend_mode {
+ I915_DRM_SUSPEND_IDLE,
+ I915_DRM_SUSPEND_MEM,
+ I915_DRM_SUSPEND_HIBERNATE,
+ };
+
+ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode);
+ void intel_power_domains_resume(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+ void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
- void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
-
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
- size_t *values_cnt);
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
}
connected = true;
}
+ cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
+
return connected;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
- enum drm_connector_status status;
+ enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ if (IS_ICELAKE(dev_priv) &&
+ !intel_digital_port_connected(encoder))
+ goto out;
+
intel_hdmi_unset_edid(connector);
if (intel_hdmi_set_edid(connector))
status = connector_status_connected;
- else
- status = connector_status_disconnected;
+ out:
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
return status;
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
+ if (intel_attached_hdmi(connector)->cec_notifier)
+ cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+
+ intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
+ port_identifier(port));
+ if (!intel_hdmi->cec_notifier)
+ DRM_DEBUG_KMS("CEC notifier get failed\n");
}
void intel_hdmi_init(struct drm_i915_private *dev_priv,