Merge v6.5-rc1 into drm-misc-fixes
authorMaxime Ripard <mripard@kernel.org>
Tue, 11 Jul 2023 07:23:20 +0000 (09:23 +0200)
committerMaxime Ripard <mripard@kernel.org>
Tue, 11 Jul 2023 07:23:20 +0000 (09:23 +0200)
Boris needs 6.5-rc1 in drm-misc-fixes to prevent a conflict.

Signed-off-by: Maxime Ripard <mripard@kernel.org>
1  2 
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_fbdev_dma.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_fence.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

@@@ -533,7 -533,7 +533,7 @@@ static struct i2c_adapter *dw_hdmi_i2c_
        adap->owner = THIS_MODULE;
        adap->dev.parent = hdmi->dev;
        adap->algo = &dw_hdmi_algorithm;
-       strlcpy(adap->name, "DesignWare HDMI", sizeof(adap->name));
+       strscpy(adap->name, "DesignWare HDMI", sizeof(adap->name));
        i2c_set_adapdata(adap, hdmi);
  
        ret = i2c_add_adapter(adap);
@@@ -1426,9 -1426,9 +1426,9 @@@ void dw_hdmi_set_high_tmds_clock_ratio(
        /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
        if (dw_hdmi_support_scdc(hdmi, display)) {
                if (mtmdsclock > HDMI14_MAX_TMDSCLK)
 -                      drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
 +                      drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
                else
 -                      drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
 +                      drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
        }
  }
  EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
@@@ -2116,7 -2116,7 +2116,7 @@@ static void hdmi_av_composer(struct dw_
                                min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
  
                        /* Enabled Scrambling in the Sink */
 -                      drm_scdc_set_scrambling(&hdmi->connector, 1);
 +                      drm_scdc_set_scrambling(hdmi->curr_conn, 1);
  
                        /*
                         * To activate the scrambler feature, you must ensure
                        hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
                        hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
                                    HDMI_MC_SWRSTZ);
 -                      drm_scdc_set_scrambling(&hdmi->connector, 0);
 +                      drm_scdc_set_scrambling(hdmi->curr_conn, 0);
                }
        }
  
@@@ -3553,7 -3553,6 +3553,7 @@@ struct dw_hdmi *dw_hdmi_probe(struct pl
        hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
                         | DRM_BRIDGE_OP_HPD;
        hdmi->bridge.interlace_allowed = true;
 +      hdmi->bridge.ddc = hdmi->ddc;
  #ifdef CONFIG_OF
        hdmi->bridge.of_node = pdev->dev.of_node;
  #endif
   * @pwm_refclk_freq: Cache for the reference clock input to the PWM.
   */
  struct ti_sn65dsi86 {
 -      struct auxiliary_device         bridge_aux;
 -      struct auxiliary_device         gpio_aux;
 -      struct auxiliary_device         aux_aux;
 -      struct auxiliary_device         pwm_aux;
 +      struct auxiliary_device         *bridge_aux;
 +      struct auxiliary_device         *gpio_aux;
 +      struct auxiliary_device         *aux_aux;
 +      struct auxiliary_device         *pwm_aux;
  
        struct device                   *dev;
        struct regmap                   *regmap;
@@@ -468,34 -468,27 +468,34 @@@ static void ti_sn65dsi86_delete_aux(voi
        auxiliary_device_delete(data);
  }
  
 -/*
 - * AUX bus docs say that a non-NULL release is mandatory, but it makes no
 - * sense for the model used here where all of the aux devices are allocated
 - * in the single shared structure. We'll use this noop as a workaround.
 - */
 -static void ti_sn65dsi86_noop(struct device *dev) {}
 +static void ti_sn65dsi86_aux_device_release(struct device *dev)
 +{
 +      struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
 +
 +      kfree(aux);
 +}
  
  static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
 -                                     struct auxiliary_device *aux,
 +                                     struct auxiliary_device **aux_out,
                                       const char *name)
  {
        struct device *dev = pdata->dev;
 +      struct auxiliary_device *aux;
        int ret;
  
 +      aux = kzalloc(sizeof(*aux), GFP_KERNEL);
 +      if (!aux)
 +              return -ENOMEM;
 +
        aux->name = name;
        aux->dev.parent = dev;
 -      aux->dev.release = ti_sn65dsi86_noop;
 +      aux->dev.release = ti_sn65dsi86_aux_device_release;
        device_set_of_node_from_dev(&aux->dev, dev);
        ret = auxiliary_device_init(aux);
 -      if (ret)
 +      if (ret) {
 +              kfree(aux);
                return ret;
 +      }
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
        if (ret)
                return ret;
        if (ret)
                return ret;
        ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
 +      if (!ret)
 +              *aux_out = aux;
  
        return ret;
  }
@@@ -631,6 -622,24 +631,24 @@@ exit
        return len;
  }
  
+ static int ti_sn_aux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+ {
+       /*
+        * The HPD in this chip is a bit useless (See comment in
+        * ti_sn65dsi86_enable_comms) so if our driver is expected to wait
+        * for HPD, we just assume it's asserted after the wait_us delay.
+        *
+        * In case we are asked to wait forever (wait_us=0) take conservative
+        * 500ms delay.
+        */
+       if (wait_us == 0)
+               wait_us = 500000;
+       usleep_range(wait_us, wait_us + 1000);
+       return 0;
+ }
  static int ti_sn_aux_probe(struct auxiliary_device *adev,
                           const struct auxiliary_device_id *id)
  {
        pdata->aux.name = "ti-sn65dsi86-aux";
        pdata->aux.dev = &adev->dev;
        pdata->aux.transfer = ti_sn_aux_transfer;
+       pdata->aux.wait_hpd_asserted = ti_sn_aux_wait_hpd_asserted;
        drm_dp_aux_init(&pdata->aux);
  
        ret = devm_of_dp_aux_populate_ep_devices(&pdata->aux);
@@@ -1964,7 -1974,7 +1983,7 @@@ static struct i2c_driver ti_sn65dsi86_d
                .of_match_table = ti_sn65dsi86_match_table,
                .pm = &ti_sn65dsi86_pm_ops,
        },
-       .probe_new = ti_sn65dsi86_probe,
+       .probe = ti_sn65dsi86_probe,
        .id_table = ti_sn65dsi86_id,
  };
  
@@@ -1,5 -1,7 +1,7 @@@
  // SPDX-License-Identifier: MIT
  
+ #include <linux/fb.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/drm_drv.h>
  #include <drm/drm_fb_helper.h>
@@@ -64,14 -66,11 +66,11 @@@ static const struct fb_ops drm_fbdev_dm
        .owner = THIS_MODULE,
        .fb_open = drm_fbdev_dma_fb_open,
        .fb_release = drm_fbdev_dma_fb_release,
-       .fb_read = drm_fb_helper_sys_read,
-       .fb_write = drm_fb_helper_sys_write,
+       __FB_DEFAULT_SYS_OPS_RDWR,
        DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_fillrect = drm_fb_helper_sys_fillrect,
-       .fb_copyarea = drm_fb_helper_sys_copyarea,
-       .fb_imageblit = drm_fb_helper_sys_imageblit,
-       .fb_destroy = drm_fbdev_dma_fb_destroy,
+       __FB_DEFAULT_SYS_OPS_DRAW,
        .fb_mmap = drm_fbdev_dma_fb_mmap,
+       .fb_destroy = drm_fbdev_dma_fb_destroy,
  };
  
  /*
@@@ -218,7 -217,7 +217,7 @@@ static const struct drm_client_funcs dr
   * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
   * @dev: DRM device
   * @preferred_bpp: Preferred bits per pixel for the device.
 - *                 @dev->mode_config.preferred_depth is used if this is zero.
 + *                 32 is used if this is zero.
   *
   * This function sets up fbdev emulation for GEM DMA drivers that support
   * dumb buffers with a virtual address and that can be mmap'ed.
@@@ -64,6 -64,7 +64,7 @@@
  #include "nouveau_connector.h"
  #include "nouveau_encoder.h"
  #include "nouveau_fence.h"
+ #include "nv50_display.h"
  
  #include <subdev/bios/dp.h>
  
@@@ -909,19 -910,15 +910,19 @@@ nv50_msto_prepare(struct drm_atomic_sta
        struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
        struct nv50_mstc *mstc = msto->mstc;
        struct nv50_mstm *mstm = mstc->mstm;
 -      struct drm_dp_mst_atomic_payload *payload;
 +      struct drm_dp_mst_topology_state *old_mst_state;
 +      struct drm_dp_mst_atomic_payload *payload, *old_payload;
  
        NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
  
 +      old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
 +
        payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
 +      old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
  
        // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
        if (msto->disabled) {
 -              drm_dp_remove_payload(mgr, mst_state, payload, payload);
 +              drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
  
                nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
        } else {
@@@ -1362,22 -1359,26 +1363,26 @@@ nv50_mstm_service(struct nouveau_drm *d
        u8 esi[8] = {};
  
        while (handled) {
+               u8 ack[8] = {};
                rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
                if (rc != 8) {
                        ret = false;
                        break;
                }
  
-               drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
+               drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
                if (!handled)
                        break;
  
-               rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
-                                      3);
-               if (rc != 3) {
+               rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
+               if (rc != 1) {
                        ret = false;
                        break;
                }
+               drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
        }
  
        if (!ret)
@@@ -759,8 -759,8 +759,8 @@@ static const struct panel_desc ampire_a
        .num_modes = 1,
        .bpc = 8,
        .size = {
-               .width = 105,
-               .height = 67,
+               .width = 99,
+               .height = 58,
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
  };
@@@ -778,6 -778,36 +778,36 @@@ static const struct drm_display_mode am
        .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
  };
  
+ static const struct display_timing ampire_am_800480l1tmqw_t00h_timing = {
+       .pixelclock = { 29930000, 33260000, 36590000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 1, 40, 168 },
+       .hback_porch = { 88, 88, 88 },
+       .hsync_len = { 1, 128, 128 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 1, 35, 37 },
+       .vback_porch = { 8, 8, 8 },
+       .vsync_len = { 1, 2, 2 },
+       .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+                DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+                DISPLAY_FLAGS_SYNC_POSEDGE,
+ };
+ static const struct panel_desc ampire_am_800480l1tmqw_t00h = {
+       .timings = &ampire_am_800480l1tmqw_t00h_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 111,
+               .height = 67,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+                    DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+                    DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
  static const struct panel_desc ampire_am800480r3tmqwa1h = {
        .modes = &ampire_am800480r3tmqwa1h_mode,
        .num_modes = 1,
@@@ -1211,6 -1241,37 +1241,37 @@@ static const struct panel_desc bananapi
        },
  };
  
+ static const struct display_timing boe_ev121wxm_n10_1850_timing = {
+       .pixelclock = { 69922000, 71000000, 72293000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 48, 48, 48 },
+       .hback_porch = { 80, 80, 80 },
+       .hsync_len = { 32, 32, 32 },
+       .vactive = { 800, 800, 800 },
+       .vfront_porch = { 3, 3, 3 },
+       .vback_porch = { 14, 14, 14 },
+       .vsync_len = { 6, 6, 6 },
+ };
+ static const struct panel_desc boe_ev121wxm_n10_1850 = {
+       .timings = &boe_ev121wxm_n10_1850_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 261,
+               .height = 163,
+       },
+       .delay = {
+               .prepare = 9,
+               .enable = 300,
+               .unprepare = 300,
+               .disable = 560,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
  static const struct drm_display_mode boe_hv070wsa_mode = {
        .clock = 42105,
        .hdisplay = 1024,
@@@ -2117,7 -2178,6 +2178,7 @@@ static const struct panel_desc innolux_
                .height = 54,
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 +      .connector_type = DRM_MODE_CONNECTOR_DPI,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
  };
  
@@@ -2143,6 -2203,38 +2204,38 @@@ static const struct panel_desc innolux_
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
  };
  
+ static const struct display_timing innolux_g070ace_l01_timing = {
+       .pixelclock = { 25200000, 35000000, 35700000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 30, 32, 87 },
+       .hback_porch = { 30, 32, 87 },
+       .hsync_len = { 1, 1, 1 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 3, 3, 3 },
+       .vback_porch = { 13, 13, 13 },
+       .vsync_len = { 1, 1, 4 },
+       .flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+ static const struct panel_desc innolux_g070ace_l01 = {
+       .timings = &innolux_g070ace_l01_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .delay = {
+               .prepare = 10,
+               .enable = 50,
+               .disable = 50,
+               .unprepare = 500,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
  static const struct display_timing innolux_g070y2_l01_timing = {
        .pixelclock = { 28000000, 29500000, 32000000 },
        .hactive = { 800, 800, 800 },
@@@ -3110,7 -3202,6 +3203,7 @@@ static const struct drm_display_mode po
        .vsync_start = 480 + 49,
        .vsync_end = 480 + 49 + 2,
        .vtotal = 480 + 49 + 2 + 22,
 +      .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
  };
  
  static const struct panel_desc powertip_ph800480t013_idf02  = {
@@@ -3190,6 -3281,32 +3283,32 @@@ static const struct panel_desc qishengl
        .connector_type = DRM_MODE_CONNECTOR_DPI,
  };
  
+ static const struct display_timing rocktech_rk043fn48h_timing = {
+       .pixelclock = { 6000000, 9000000, 12000000 },
+       .hactive = { 480, 480, 480 },
+       .hback_porch = { 8, 43, 43 },
+       .hfront_porch = { 2, 8, 8 },
+       .hsync_len = { 1, 1, 1 },
+       .vactive = { 272, 272, 272 },
+       .vback_porch = { 2, 12, 12 },
+       .vfront_porch = { 1, 4, 4 },
+       .vsync_len = { 1, 10, 10 },
+       .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
+                DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ };
+ static const struct panel_desc rocktech_rk043fn48h = {
+       .timings = &rocktech_rk043fn48h_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 95,
+               .height = 54,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
  static const struct display_timing rocktech_rk070er9427_timing = {
        .pixelclock = { 26400000, 33300000, 46800000 },
        .hactive = { 800, 800, 800 },
@@@ -3932,6 -4049,9 +4051,9 @@@ static const struct of_device_id platfo
        }, {
                .compatible = "ampire,am-480272h3tmqw-t01h",
                .data = &ampire_am_480272h3tmqw_t01h,
+       }, {
+               .compatible = "ampire,am-800480l1tmqw-t00h",
+               .data = &ampire_am_800480l1tmqw_t00h,
        }, {
                .compatible = "ampire,am800480r3tmqwa1h",
                .data = &ampire_am800480r3tmqwa1h,
        }, {
                .compatible = "bananapi,s070wv20-ct16",
                .data = &bananapi_s070wv20_ct16,
+       }, {
+               .compatible = "boe,ev121wxm-n10-1850",
+               .data = &boe_ev121wxm_n10_1850,
        }, {
                .compatible = "boe,hv070wsa-100",
                .data = &boe_hv070wsa
        }, {
                .compatible = "innolux,at070tn92",
                .data = &innolux_at070tn92,
+       }, {
+               .compatible = "innolux,g070ace-l01",
+               .data = &innolux_g070ace_l01,
        }, {
                .compatible = "innolux,g070y2-l01",
                .data = &innolux_g070y2_l01,
        }, {
                .compatible = "qishenglong,gopher2b-lcd",
                .data = &qishenglong_gopher2b_lcd,
+       }, {
+               .compatible = "rocktech,rk043fn48h",
+               .data = &rocktech_rk043fn48h,
        }, {
                .compatible = "rocktech,rk070er9427",
                .data = &rocktech_rk070er9427,
@@@ -72,7 -72,7 +72,7 @@@ int drm_sched_entity_init(struct drm_sc
        entity->num_sched_list = num_sched_list;
        entity->priority = priority;
        entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
-       entity->last_scheduled = NULL;
+       RCU_INIT_POINTER(entity->last_scheduled, NULL);
        RB_CLEAR_NODE(&entity->rb_tree_node);
  
        if(num_sched_list)
@@@ -140,11 -140,32 +140,32 @@@ bool drm_sched_entity_is_ready(struct d
        return true;
  }
  
+ /**
+  * drm_sched_entity_error - return error of last scheduled job
+  * @entity: scheduler entity to check
+  *
+  * Opportunistically return the error of the last scheduled job. Result can
+  * change any time when new jobs are pushed to the hw.
+  */
+ int drm_sched_entity_error(struct drm_sched_entity *entity)
+ {
+       struct dma_fence *fence;
+       int r;
+       rcu_read_lock();
+       fence = rcu_dereference(entity->last_scheduled);
+       r = fence ? fence->error : 0;
+       rcu_read_unlock();
+       return r;
+ }
+ EXPORT_SYMBOL(drm_sched_entity_error);
  static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
  {
        struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
  
-       drm_sched_fence_finished(job->s_fence);
+       drm_sched_fence_finished(job->s_fence, -ESRCH);
        WARN_ON(job->s_fence->parent);
        job->sched->ops->free_job(job);
  }
@@@ -155,32 -176,16 +176,32 @@@ static void drm_sched_entity_kill_jobs_
  {
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
 -      int r;
 +      unsigned long index;
  
        dma_fence_put(f);
  
        /* Wait for all dependencies to avoid data corruptions */
 -      while (!xa_empty(&job->dependencies)) {
 -              f = xa_erase(&job->dependencies, job->last_dependency++);
 -              r = dma_fence_add_callback(f, &job->finish_cb,
 -                                         drm_sched_entity_kill_jobs_cb);
 -              if (!r)
 +      xa_for_each(&job->dependencies, index, f) {
 +              struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
 +
 +              if (s_fence && f == &s_fence->scheduled) {
 +                      /* The dependencies array had a reference on the scheduled
 +                       * fence, and the finished fence refcount might have
 +                       * dropped to zero. Use dma_fence_get_rcu() so we get
 +                       * a NULL fence in that case.
 +                       */
 +                      f = dma_fence_get_rcu(&s_fence->finished);
 +
 +                      /* Now that we have a reference on the finished fence,
 +                       * we can release the reference the dependencies array
 +                       * had on the scheduled fence.
 +                       */
 +                      dma_fence_put(&s_fence->scheduled);
 +              }
 +
 +              xa_erase(&job->dependencies, index);
 +              if (f && !dma_fence_add_callback(f, &job->finish_cb,
 +                                               drm_sched_entity_kill_jobs_cb))
                        return;
  
                dma_fence_put(f);
@@@ -207,12 -212,12 +228,12 @@@ static void drm_sched_entity_kill(struc
        /* Make sure this entity is not used by the scheduler at the moment */
        wait_for_completion(&entity->entity_idle);
  
-       prev = dma_fence_get(entity->last_scheduled);
+       /* The entity is guaranteed to not be used by the scheduler */
+       prev = rcu_dereference_check(entity->last_scheduled, true);
+       dma_fence_get(prev);
        while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
                struct drm_sched_fence *s_fence = job->s_fence;
  
-               dma_fence_set_error(&s_fence->finished, -ESRCH);
                dma_fence_get(&s_fence->finished);
                if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
                                           drm_sched_entity_kill_jobs_cb))
@@@ -296,8 -301,8 +317,8 @@@ void drm_sched_entity_fini(struct drm_s
                entity->dependency = NULL;
        }
  
-       dma_fence_put(entity->last_scheduled);
-       entity->last_scheduled = NULL;
+       dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
+       RCU_INIT_POINTER(entity->last_scheduled, NULL);
  }
  EXPORT_SYMBOL(drm_sched_entity_fini);
  
@@@ -337,7 -342,7 +358,7 @@@ static void drm_sched_entity_wakeup(str
                container_of(cb, struct drm_sched_entity, cb);
  
        drm_sched_entity_clear_dep(f, cb);
-       drm_sched_wakeup(entity->rq->sched);
+       drm_sched_wakeup_if_can_queue(entity->rq->sched);
  }
  
  /**
@@@ -379,7 -384,7 +400,7 @@@ static bool drm_sched_entity_add_depend
        }
  
        s_fence = to_drm_sched_fence(fence);
-       if (s_fence && s_fence->sched == sched &&
+       if (!fence->error && s_fence && s_fence->sched == sched &&
            !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
  
                /*
@@@ -410,17 -415,8 +431,17 @@@ static struct dma_fence 
  drm_sched_job_dependency(struct drm_sched_job *job,
                         struct drm_sched_entity *entity)
  {
 -      if (!xa_empty(&job->dependencies))
 -              return xa_erase(&job->dependencies, job->last_dependency++);
 +      struct dma_fence *f;
 +
 +      /* We keep the fence around, so we can iterate over all dependencies
 +       * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
 +       * before killing the job.
 +       */
 +      f = xa_load(&job->dependencies, job->last_dependency);
 +      if (f) {
 +              job->last_dependency++;
 +              return dma_fence_get(f);
 +      }
  
        if (job->sched->ops->prepare_job)
                return job->sched->ops->prepare_job(job, entity);
@@@ -448,9 -444,9 +469,9 @@@ struct drm_sched_job *drm_sched_entity_
        if (entity->guilty && atomic_read(entity->guilty))
                dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
  
-       dma_fence_put(entity->last_scheduled);
-       entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+       dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
+       rcu_assign_pointer(entity->last_scheduled,
+                          dma_fence_get(&sched_job->s_fence->finished));
  
        /*
         * If the queue is empty we allow drm_sched_entity_select_rq() to
                        drm_sched_rq_update_fifo(entity, next->submit_ts);
        }
  
+       /* Jobs and entities might have different lifecycles. Since we're
+        * removing the job from the entities queue, set the jobs entity pointer
+        * to NULL to prevent any future access of the entity through this job.
+        */
+       sched_job->entity = NULL;
        return sched_job;
  }
  
@@@ -498,7 -500,7 +525,7 @@@ void drm_sched_entity_select_rq(struct 
         */
        smp_rmb();
  
-       fence = entity->last_scheduled;
+       fence = rcu_dereference_check(entity->last_scheduled, true);
  
        /* stay on the same engine if the previous job hasn't finished */
        if (fence && !dma_fence_is_signaled(fence))
@@@ -563,7 -565,7 +590,7 @@@ void drm_sched_entity_push_job(struct d
                if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
                        drm_sched_rq_update_fifo(entity, submit_ts);
  
-               drm_sched_wakeup(entity->rq->sched);
+               drm_sched_wakeup_if_can_queue(entity->rq->sched);
        }
  }
  EXPORT_SYMBOL(drm_sched_entity_push_job);
@@@ -48,37 -48,15 +48,39 @@@ static void __exit drm_sched_fence_slab
        kmem_cache_destroy(sched_fence_slab);
  }
  
 -void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
 +static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
 +                                     struct dma_fence *fence)
  {
 +      /*
 +       * smp_store_release() to ensure another thread racing us
 +       * in drm_sched_fence_set_deadline_finished() sees the
 +       * fence's parent set before test_bit()
 +       */
 +      smp_store_release(&s_fence->parent, dma_fence_get(fence));
 +      if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
 +                   &s_fence->finished.flags))
 +              dma_fence_set_deadline(fence, s_fence->deadline);
 +}
 +
 +void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
 +                             struct dma_fence *parent)
 +{
 +      /* Set the parent before signaling the scheduled fence, such that,
 +       * any waiter expecting the parent to be filled after the job has
 +       * been scheduled (which is the case for drivers delegating waits
 +       * to some firmware) doesn't have to busy wait for parent to show
 +       * up.
 +       */
 +      if (!IS_ERR_OR_NULL(parent))
 +              drm_sched_fence_set_parent(fence, parent);
 +
        dma_fence_signal(&fence->scheduled);
  }
  
- void drm_sched_fence_finished(struct drm_sched_fence *fence)
+ void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
  {
+       if (result)
+               dma_fence_set_error(&fence->finished, result);
        dma_fence_signal(&fence->finished);
  }
  
@@@ -203,6 -181,20 +205,6 @@@ struct drm_sched_fence *to_drm_sched_fe
  }
  EXPORT_SYMBOL(to_drm_sched_fence);
  
 -void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
 -                              struct dma_fence *fence)
 -{
 -      /*
 -       * smp_store_release() to ensure another thread racing us
 -       * in drm_sched_fence_set_deadline_finished() sees the
 -       * fence's parent set before test_bit()
 -       */
 -      smp_store_release(&s_fence->parent, dma_fence_get(fence));
 -      if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
 -                   &s_fence->finished.flags))
 -              dma_fence_set_deadline(fence, s_fence->deadline);
 -}
 -
  struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
                                              void *owner)
  {
   *    the hardware.
   *
   * The jobs in a entity are always scheduled in the order that they were pushed.
+  *
+  * Note that once a job was taken from the entities queue and pushed to the
+  * hardware, i.e. the pending queue, the entity must not be referenced anymore
+  * through the jobs entity pointer.
   */
  
  #include <linux/kthread.h>
@@@ -258,7 -262,7 +262,7 @@@ drm_sched_rq_select_entity_fifo(struct 
   *
   * Finish the job's fence and wake up the worker thread.
   */
- static void drm_sched_job_done(struct drm_sched_job *s_job)
+ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
  {
        struct drm_sched_fence *s_fence = s_job->s_fence;
        struct drm_gpu_scheduler *sched = s_fence->sched;
        trace_drm_sched_process_job(s_fence);
  
        dma_fence_get(&s_fence->finished);
-       drm_sched_fence_finished(s_fence);
+       drm_sched_fence_finished(s_fence, result);
        dma_fence_put(&s_fence->finished);
        wake_up_interruptible(&sched->wake_up_worker);
  }
@@@ -283,7 -287,7 +287,7 @@@ static void drm_sched_job_done_cb(struc
  {
        struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
  
-       drm_sched_job_done(s_job);
+       drm_sched_job_done(s_job, f->error);
  }
  
  /**
@@@ -534,12 -538,12 +538,12 @@@ void drm_sched_start(struct drm_gpu_sch
                        r = dma_fence_add_callback(fence, &s_job->cb,
                                                   drm_sched_job_done_cb);
                        if (r == -ENOENT)
-                               drm_sched_job_done(s_job);
+                               drm_sched_job_done(s_job, fence->error);
                        else if (r)
                                DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
                                          r);
                } else
-                       drm_sched_job_done(s_job);
+                       drm_sched_job_done(s_job, -ECANCELED);
        }
  
        if (full_recovery) {
@@@ -844,27 -848,26 +848,26 @@@ void drm_sched_job_cleanup(struct drm_s
  EXPORT_SYMBOL(drm_sched_job_cleanup);
  
  /**
-  * drm_sched_ready - is the scheduler ready
-  *
+  * drm_sched_can_queue -- Can we queue more to the hardware?
   * @sched: scheduler instance
   *
   * Return true if we can push more jobs to the hw, otherwise false.
   */
- static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
+ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
  {
        return atomic_read(&sched->hw_rq_count) <
                sched->hw_submission_limit;
  }
  
  /**
-  * drm_sched_wakeup - Wake up the scheduler when it is ready
-  *
+  * drm_sched_wakeup_if_can_queue - Wake up the scheduler
   * @sched: scheduler instance
   *
+  * Wake up the scheduler if we can queue jobs.
   */
- void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
+ void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
  {
-       if (drm_sched_ready(sched))
+       if (drm_sched_can_queue(sched))
                wake_up_interruptible(&sched->wake_up_worker);
  }
  
@@@ -881,7 -884,7 +884,7 @@@ drm_sched_select_entity(struct drm_gpu_
        struct drm_sched_entity *entity;
        int i;
  
-       if (!drm_sched_ready(sched))
+       if (!drm_sched_can_queue(sched))
                return NULL;
  
        /* Kernel run queue has higher priority than normal run queue*/
@@@ -1040,24 -1043,23 +1043,22 @@@ static int drm_sched_main(void *param
                trace_drm_run_job(sched_job, entity);
                fence = sched->ops->run_job(sched_job);
                complete_all(&entity->entity_idle);
 -              drm_sched_fence_scheduled(s_fence);
 +              drm_sched_fence_scheduled(s_fence, fence);
  
                if (!IS_ERR_OR_NULL(fence)) {
 -                      drm_sched_fence_set_parent(s_fence, fence);
                        /* Drop for original kref_init of the fence */
                        dma_fence_put(fence);
  
                        r = dma_fence_add_callback(fence, &sched_job->cb,
                                                   drm_sched_job_done_cb);
                        if (r == -ENOENT)
-                               drm_sched_job_done(sched_job);
+                               drm_sched_job_done(sched_job, fence->error);
                        else if (r)
                                DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
                                          r);
                } else {
-                       if (IS_ERR(fence))
-                               dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
-                       drm_sched_job_done(sched_job);
+                       drm_sched_job_done(sched_job, IS_ERR(fence) ?
+                                          PTR_ERR(fence) : 0);
                }
  
                wake_up(&sched->job_scheduled);
@@@ -201,7 -201,7 +201,7 @@@ struct drm_sched_entity 
         * by the scheduler thread, can be accessed locklessly from
         * drm_sched_job_arm() iff the queue is empty.
         */
-       struct dma_fence                *last_scheduled;
+       struct dma_fence __rcu          *last_scheduled;
  
        /**
         * @last_user: last group leader pushing a job into the entity.
@@@ -549,7 -549,7 +549,7 @@@ void drm_sched_entity_modify_sched(stru
                                     unsigned int num_sched_list);
  
  void drm_sched_job_cleanup(struct drm_sched_job *job);
- void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+ void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
  void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
  void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
  void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
@@@ -581,16 -581,18 +581,17 @@@ void drm_sched_entity_push_job(struct d
  void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
                                   enum drm_sched_priority priority);
  bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+ int drm_sched_entity_error(struct drm_sched_entity *entity);
  
 -void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
 -                              struct dma_fence *fence);
  struct drm_sched_fence *drm_sched_fence_alloc(
        struct drm_sched_entity *s_entity, void *owner);
  void drm_sched_fence_init(struct drm_sched_fence *fence,
                          struct drm_sched_entity *entity);
  void drm_sched_fence_free(struct drm_sched_fence *fence);
  
 -void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 +void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
 +                             struct dma_fence *parent);
- void drm_sched_fence_finished(struct drm_sched_fence *fence);
+ void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
  
  unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
  void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,