2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/kernel.h>
27 #include "intel_display_types.h"
28 #include "intel_hotplug.h"
33 * Simply put, hotplug occurs when a display is connected to or disconnected
34 * from the system. However, there may be adapters and docking stations and
35 * Display Port short pulses and MST devices involved, complicating matters.
37 * Hotplug in i915 is handled in many different levels of abstraction.
39 * The platform dependent interrupt handling code in i915_irq.c enables,
40 * disables, and does preliminary handling of the interrupts. The interrupt
41 * handlers gather the hotplug detect (HPD) information from relevant registers
42 * into a platform independent mask of hotplug pins that have fired.
44 * The platform independent interrupt handler intel_hpd_irq_handler() in
45 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
46 * further processing to appropriate bottom halves (Display Port specific and
49 * The Display Port work function i915_digport_work_func() calls into
50 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
51 * pulses, with failures and non-MST long pulses triggering regular hotplug
52 * processing on the connector.
54 * The regular hotplug work function i915_hotplug_work_func() calls connector
55 * detect hooks, and, if connector status changes, triggers sending of hotplug
56 * uevent to userspace via drm_kms_helper_hotplug_event().
58 * Finally, the userspace is responsible for triggering a modeset upon receiving
59 * the hotplug uevent, disabling or enabling the crtc as needed.
61 * The hotplug interrupt storm detection and mitigation code keeps track of the
62 * number of interrupts per hotplug pin per a period of time, and if the number
63 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
64 * while before being re-enabled. The intention is to mitigate issues raising
65 * from broken hardware triggering massive amounts of interrupts and grinding
66 * the system to a halt.
68 * Current implementation expects that hotplug interrupt storm will not be
69 * seen when display port sink is connected, hence on platforms whose DP
70 * callback is handled by i915_digport_work_func reenabling of hpd is not
71 * performed (it was never expected to be disabled in the first place ;) )
72 * this is specific to DP sinks handled by this routine and any other display
73 * such as HDMI or DVI enabled on the same port will have proper logic since
74 * it will use i915_hotplug_work_func where this logic is handled.
78 * intel_hpd_pin_default - return default pin associated with certain port.
79 * @dev_priv: private driver data pointer
80 * @port: the hpd port to get associated pin
82 * It is only valid and used by digital port encoder.
84 * Return pin that is associatade with @port and HDP_NONE if no pin is
85 * hard associated with that @port.
87 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
90 enum phy phy = intel_port_to_phy(dev_priv, port);
94 return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
97 return HPD_PORT_A + phy - PHY_A;
104 #define HPD_STORM_DETECT_PERIOD 1000
105 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
106 #define HPD_RETRY_DELAY 1000
109 intel_connector_hpd_pin(struct intel_connector *connector)
111 struct intel_encoder *encoder = intel_attached_encoder(connector);
114 * MST connectors get their encoder attached dynamically
115 * so need to make sure we have an encoder here. But since
116 * MST encoders have their hpd_pin set to HPD_NONE we don't
117 * have to special case them beyond that.
119 return encoder ? encoder->hpd_pin : HPD_NONE;
123 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
124 * @dev_priv: private driver data pointer
125 * @pin: the pin to gather stats on
126 * @long_hpd: whether the HPD IRQ was long or short
128 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
129 * storms. Only the pin specific stats and state are changed, the caller is
130 * responsible for further action.
132 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
133 * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
134 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
135 * short IRQs count as +1. If this threshold is exceeded, it's considered an
136 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
138 * By default, most systems will only count long IRQs towards
139 * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
140 * suffer from short IRQ storms and must also track these. Because short IRQ
141 * storms are naturally caused by sideband interactions with DP MST devices,
142 * short IRQ detection is only enabled for systems without DP MST support.
143 * Systems which are new enough to support DP MST are far less likely to
144 * suffer from IRQ storms at all, so this is fine.
146 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
147 * and should only be adjusted for automated hotplug testing.
149 * Return true if an IRQ storm was detected on @pin.
151 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
152 enum hpd_pin pin, bool long_hpd)
154 struct i915_hotplug *hpd = &dev_priv->hotplug;
155 unsigned long start = hpd->stats[pin].last_jiffies;
156 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
157 const int increment = long_hpd ? 10 : 1;
158 const int threshold = hpd->hpd_storm_threshold;
162 (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
165 if (!time_in_range(jiffies, start, end)) {
166 hpd->stats[pin].last_jiffies = jiffies;
167 hpd->stats[pin].count = 0;
170 hpd->stats[pin].count += increment;
171 if (hpd->stats[pin].count > threshold) {
172 hpd->stats[pin].state = HPD_MARK_DISABLED;
173 drm_dbg_kms(&dev_priv->drm,
174 "HPD interrupt storm detected on PIN %d\n", pin);
177 drm_dbg_kms(&dev_priv->drm,
178 "Received HPD interrupt on PIN %d - cnt: %d\n",
180 hpd->stats[pin].count);
187 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
189 struct drm_device *dev = &dev_priv->drm;
190 struct drm_connector_list_iter conn_iter;
191 struct intel_connector *connector;
192 bool hpd_disabled = false;
194 lockdep_assert_held(&dev_priv->irq_lock);
196 drm_connector_list_iter_begin(dev, &conn_iter);
197 for_each_intel_connector_iter(connector, &conn_iter) {
200 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
203 pin = intel_connector_hpd_pin(connector);
204 if (pin == HPD_NONE ||
205 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
208 drm_info(&dev_priv->drm,
209 "HPD interrupt storm detected on connector %s: "
210 "switching from hotplug detection to polling\n",
211 connector->base.name);
213 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
214 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
215 DRM_CONNECTOR_POLL_DISCONNECT;
218 drm_connector_list_iter_end(&conn_iter);
220 /* Enable polling and queue hotplug re-enabling. */
222 drm_kms_helper_poll_enable(dev);
223 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
224 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
228 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
230 struct drm_i915_private *dev_priv =
231 container_of(work, typeof(*dev_priv),
232 hotplug.reenable_work.work);
233 struct drm_device *dev = &dev_priv->drm;
234 struct drm_connector_list_iter conn_iter;
235 struct intel_connector *connector;
236 intel_wakeref_t wakeref;
239 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
241 spin_lock_irq(&dev_priv->irq_lock);
243 drm_connector_list_iter_begin(dev, &conn_iter);
244 for_each_intel_connector_iter(connector, &conn_iter) {
245 pin = intel_connector_hpd_pin(connector);
246 if (pin == HPD_NONE ||
247 dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
250 if (connector->base.polled != connector->polled)
251 drm_dbg(&dev_priv->drm,
252 "Reenabling HPD on connector %s\n",
253 connector->base.name);
254 connector->base.polled = connector->polled;
256 drm_connector_list_iter_end(&conn_iter);
258 for_each_hpd_pin(pin) {
259 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
260 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
263 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
264 dev_priv->display.hpd_irq_setup(dev_priv);
266 spin_unlock_irq(&dev_priv->irq_lock);
268 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
271 enum intel_hotplug_state
272 intel_encoder_hotplug(struct intel_encoder *encoder,
273 struct intel_connector *connector,
276 struct drm_device *dev = connector->base.dev;
277 enum drm_connector_status old_status;
279 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
280 old_status = connector->base.status;
282 connector->base.status =
283 drm_helper_probe_detect(&connector->base, NULL, false);
285 if (old_status == connector->base.status)
286 return INTEL_HOTPLUG_UNCHANGED;
288 drm_dbg_kms(&to_i915(dev)->drm,
289 "[CONNECTOR:%d:%s] status updated from %s to %s\n",
290 connector->base.base.id,
291 connector->base.name,
292 drm_get_connector_status_name(old_status),
293 drm_get_connector_status_name(connector->base.status));
295 return INTEL_HOTPLUG_CHANGED;
298 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
300 return intel_encoder_is_dig_port(encoder) &&
301 enc_to_dig_port(encoder)->hpd_pulse != NULL;
304 static void i915_digport_work_func(struct work_struct *work)
306 struct drm_i915_private *dev_priv =
307 container_of(work, struct drm_i915_private, hotplug.dig_port_work);
308 u32 long_port_mask, short_port_mask;
309 struct intel_encoder *encoder;
312 spin_lock_irq(&dev_priv->irq_lock);
313 long_port_mask = dev_priv->hotplug.long_port_mask;
314 dev_priv->hotplug.long_port_mask = 0;
315 short_port_mask = dev_priv->hotplug.short_port_mask;
316 dev_priv->hotplug.short_port_mask = 0;
317 spin_unlock_irq(&dev_priv->irq_lock);
319 for_each_intel_encoder(&dev_priv->drm, encoder) {
320 struct intel_digital_port *dig_port;
321 enum port port = encoder->port;
322 bool long_hpd, short_hpd;
325 if (!intel_encoder_has_hpd_pulse(encoder))
328 long_hpd = long_port_mask & BIT(port);
329 short_hpd = short_port_mask & BIT(port);
331 if (!long_hpd && !short_hpd)
334 dig_port = enc_to_dig_port(encoder);
336 ret = dig_port->hpd_pulse(dig_port, long_hpd);
337 if (ret == IRQ_NONE) {
338 /* fall back to old school hpd */
339 old_bits |= BIT(encoder->hpd_pin);
344 spin_lock_irq(&dev_priv->irq_lock);
345 dev_priv->hotplug.event_bits |= old_bits;
346 spin_unlock_irq(&dev_priv->irq_lock);
347 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
352 * Handle hotplug events outside the interrupt handler proper.
354 static void i915_hotplug_work_func(struct work_struct *work)
356 struct drm_i915_private *dev_priv =
357 container_of(work, struct drm_i915_private,
358 hotplug.hotplug_work.work);
359 struct drm_device *dev = &dev_priv->drm;
360 struct drm_connector_list_iter conn_iter;
361 struct intel_connector *connector;
362 u32 changed = 0, retry = 0;
366 mutex_lock(&dev->mode_config.mutex);
367 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
369 spin_lock_irq(&dev_priv->irq_lock);
371 hpd_event_bits = dev_priv->hotplug.event_bits;
372 dev_priv->hotplug.event_bits = 0;
373 hpd_retry_bits = dev_priv->hotplug.retry_bits;
374 dev_priv->hotplug.retry_bits = 0;
376 /* Enable polling for connectors which had HPD IRQ storms */
377 intel_hpd_irq_storm_switch_to_polling(dev_priv);
379 spin_unlock_irq(&dev_priv->irq_lock);
381 drm_connector_list_iter_begin(dev, &conn_iter);
382 for_each_intel_connector_iter(connector, &conn_iter) {
386 pin = intel_connector_hpd_pin(connector);
391 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
392 struct intel_encoder *encoder =
393 intel_attached_encoder(connector);
395 drm_dbg_kms(&dev_priv->drm,
396 "Connector %s (pin %i) received hotplug event.\n",
397 connector->base.name, pin);
399 switch (encoder->hotplug(encoder, connector,
400 hpd_event_bits & hpd_bit)) {
401 case INTEL_HOTPLUG_UNCHANGED:
403 case INTEL_HOTPLUG_CHANGED:
406 case INTEL_HOTPLUG_RETRY:
412 drm_connector_list_iter_end(&conn_iter);
413 mutex_unlock(&dev->mode_config.mutex);
416 drm_kms_helper_hotplug_event(dev);
418 /* Remove shared HPD pins that have changed */
421 spin_lock_irq(&dev_priv->irq_lock);
422 dev_priv->hotplug.retry_bits |= retry;
423 spin_unlock_irq(&dev_priv->irq_lock);
425 mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
426 msecs_to_jiffies(HPD_RETRY_DELAY));
432 * intel_hpd_irq_handler - main hotplug irq handler
433 * @dev_priv: drm_i915_private
434 * @pin_mask: a mask of hpd pins that have triggered the irq
435 * @long_mask: a mask of hpd pins that may be long hpd pulses
437 * This is the main hotplug irq handler for all platforms. The platform specific
438 * irq handlers call the platform specific hotplug irq handlers, which read and
439 * decode the appropriate registers into bitmasks about hpd pins that have
440 * triggered (@pin_mask), and which of those pins may be long pulses
441 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
442 * is not a digital port.
444 * Here, we do hotplug irq storm detection and mitigation, and pass further
445 * processing to appropriate bottom halves.
447 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
448 u32 pin_mask, u32 long_mask)
450 struct intel_encoder *encoder;
451 bool storm_detected = false;
452 bool queue_dig = false, queue_hp = false;
453 u32 long_hpd_pulse_mask = 0;
454 u32 short_hpd_pulse_mask = 0;
460 spin_lock(&dev_priv->irq_lock);
463 * Determine whether ->hpd_pulse() exists for each pin, and
464 * whether we have a short or a long pulse. This is needed
465 * as each pin may have up to two encoders (HDMI and DP) and
466 * only the one of them (DP) will have ->hpd_pulse().
468 for_each_intel_encoder(&dev_priv->drm, encoder) {
469 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
470 enum port port = encoder->port;
473 pin = encoder->hpd_pin;
474 if (!(BIT(pin) & pin_mask))
480 long_hpd = long_mask & BIT(pin);
482 drm_dbg(&dev_priv->drm,
483 "digital hpd on [ENCODER:%d:%s] - %s\n",
484 encoder->base.base.id, encoder->base.name,
485 long_hpd ? "long" : "short");
489 long_hpd_pulse_mask |= BIT(pin);
490 dev_priv->hotplug.long_port_mask |= BIT(port);
492 short_hpd_pulse_mask |= BIT(pin);
493 dev_priv->hotplug.short_port_mask |= BIT(port);
497 /* Now process each pin just once */
498 for_each_hpd_pin(pin) {
501 if (!(BIT(pin) & pin_mask))
504 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
506 * On GMCH platforms the interrupt mask bits only
507 * prevent irq generation, not the setting of the
508 * hotplug bits itself. So only WARN about unexpected
509 * interrupts on saner platforms.
511 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
512 "Received HPD interrupt on pin %d although disabled\n",
517 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
521 * Delegate to ->hpd_pulse() if one of the encoders for this
522 * pin has it, otherwise let the hotplug_work deal with this
525 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
526 long_hpd = long_hpd_pulse_mask & BIT(pin);
528 dev_priv->hotplug.event_bits |= BIT(pin);
533 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
534 dev_priv->hotplug.event_bits &= ~BIT(pin);
535 storm_detected = true;
541 * Disable any IRQs that storms were detected on. Polling enablement
542 * happens later in our hotplug work.
544 if (storm_detected && dev_priv->display_irqs_enabled)
545 dev_priv->display.hpd_irq_setup(dev_priv);
546 spin_unlock(&dev_priv->irq_lock);
549 * Our hotplug handler can grab modeset locks (by calling down into the
550 * fb helpers). Hence it must not be run on our own dev-priv->wq work
551 * queue for otherwise the flush_work in the pageflip code will
555 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
557 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
561 * intel_hpd_init - initializes and enables hpd support
562 * @dev_priv: i915 device instance
564 * This function enables the hotplug support. It requires that interrupts have
565 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
566 * poll request can run concurrently to other code, so locking rules must be
569 * This is a separate step from interrupt enabling to simplify the locking rules
570 * in the driver load and resume code.
572 * Also see: intel_hpd_poll_init(), which enables connector polling
574 void intel_hpd_init(struct drm_i915_private *dev_priv)
578 for_each_hpd_pin(i) {
579 dev_priv->hotplug.stats[i].count = 0;
580 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
583 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
584 schedule_work(&dev_priv->hotplug.poll_init_work);
587 * Interrupt setup is already guaranteed to be single-threaded, this is
588 * just to make the assert_spin_locked checks happy.
590 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
591 spin_lock_irq(&dev_priv->irq_lock);
592 if (dev_priv->display_irqs_enabled)
593 dev_priv->display.hpd_irq_setup(dev_priv);
594 spin_unlock_irq(&dev_priv->irq_lock);
598 static void i915_hpd_poll_init_work(struct work_struct *work)
600 struct drm_i915_private *dev_priv =
601 container_of(work, struct drm_i915_private,
602 hotplug.poll_init_work);
603 struct drm_device *dev = &dev_priv->drm;
604 struct drm_connector_list_iter conn_iter;
605 struct intel_connector *connector;
608 mutex_lock(&dev->mode_config.mutex);
610 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
612 drm_connector_list_iter_begin(dev, &conn_iter);
613 for_each_intel_connector_iter(connector, &conn_iter) {
616 pin = intel_connector_hpd_pin(connector);
620 connector->base.polled = connector->polled;
622 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
623 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
624 DRM_CONNECTOR_POLL_DISCONNECT;
626 drm_connector_list_iter_end(&conn_iter);
629 drm_kms_helper_poll_enable(dev);
631 mutex_unlock(&dev->mode_config.mutex);
634 * We might have missed any hotplugs that happened while we were
635 * in the middle of disabling polling
638 drm_helper_hpd_irq_event(dev);
642 * intel_hpd_poll_init - enables/disables polling for connectors with hpd
643 * @dev_priv: i915 device instance
645 * This function enables polling for all connectors, regardless of whether or
646 * not they support hotplug detection. Under certain conditions HPD may not be
647 * functional. On most Intel GPUs, this happens when we enter runtime suspend.
648 * On Valleyview and Cherryview systems, this also happens when we shut off all
651 * Since this function can get called in contexts where we're already holding
652 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
655 * Also see: intel_hpd_init(), which restores hpd handling.
657 void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
659 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
662 * We might already be holding dev->mode_config.mutex, so do this in a
664 * As well, there's no issue if we race here since we always reschedule
667 schedule_work(&dev_priv->hotplug.poll_init_work);
670 void intel_hpd_init_work(struct drm_i915_private *dev_priv)
672 INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
673 i915_hotplug_work_func);
674 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
675 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
676 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
677 intel_hpd_irq_storm_reenable_work);
680 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
682 spin_lock_irq(&dev_priv->irq_lock);
684 dev_priv->hotplug.long_port_mask = 0;
685 dev_priv->hotplug.short_port_mask = 0;
686 dev_priv->hotplug.event_bits = 0;
687 dev_priv->hotplug.retry_bits = 0;
689 spin_unlock_irq(&dev_priv->irq_lock);
691 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
692 cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
693 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
694 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
697 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
704 spin_lock_irq(&dev_priv->irq_lock);
705 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
706 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
709 spin_unlock_irq(&dev_priv->irq_lock);
714 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
719 spin_lock_irq(&dev_priv->irq_lock);
720 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
721 spin_unlock_irq(&dev_priv->irq_lock);