goto out;
        }
 
+       /* assume EQ code doesn't need to check eqe index */
+       hdev->event_queue.check_eqe_index = false;
+
        /* Read FW application security bits again */
-       if (hdev->asic_prop.fw_cpu_boot_dev_sts0_valid)
+       if (hdev->asic_prop.fw_cpu_boot_dev_sts0_valid) {
                hdev->asic_prop.fw_app_cpu_boot_dev_sts0 =
                                                RREG32(sts_boot_dev_sts0_reg);
+               if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+                               CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
+                       hdev->event_queue.check_eqe_index = true;
+       }
 
        if (hdev->asic_prop.fw_cpu_boot_dev_sts1_valid)
                hdev->asic_prop.fw_app_cpu_boot_dev_sts1 =
 
  * @kernel_address: holds the queue's kernel virtual address
  * @bus_address: holds the queue's DMA address
  * @ci: ci inside the queue
+ * @prev_eqe_index: the index of the previous event queue entry. The index of
+ *                  the current entry's index must be +1 of the previous one.
+ * @check_eqe_index: do we need to check the index of the current entry vs. the
+ *                   previous one. This is for backward compatibility with older
+ *                   firmwares
  */
 struct hl_eq {
        struct hl_device        *hdev;
        void                    *kernel_address;
        dma_addr_t              bus_address;
        u32                     ci;
+       u32                     prev_eqe_index;
+       bool                    check_eqe_index;
 };
 
 
 
        struct hl_eq_entry *eq_entry;
        struct hl_eq_entry *eq_base;
        struct hl_eqe_work *handle_eqe_work;
+       bool entry_ready;
+       u32 cur_eqe;
+       u16 cur_eqe_index;
 
        eq_base = eq->kernel_address;
 
        while (1) {
-               bool entry_ready =
-                       ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
-                               EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
+               cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
+               entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
 
                if (!entry_ready)
                        break;
 
+               cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
+               if ((hdev->event_queue.check_eqe_index) &&
+                               (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
+                                                       != cur_eqe_index)) {
+                       dev_dbg(hdev->dev,
+                               "EQE 0x%x in queue is ready but index does not match %d!=%d",
+                               eq_base[eq->ci].hdr.ctl,
+                               ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
+                               cur_eqe_index);
+                       break;
+               }
+
+               eq->prev_eqe_index++;
+
                eq_entry = &eq_base[eq->ci];
 
                /*
        q->hdev = hdev;
        q->kernel_address = p;
        q->ci = 0;
+       q->prev_eqe_index = 0;
 
        return 0;
 }
 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
 {
        q->ci = 0;
+       q->prev_eqe_index = 0;
 
        /*
         * It's not enough to just reset the PI/CI because the H/W may have
 
 #define EQ_CTL_EVENT_TYPE_SHIFT                16
 #define EQ_CTL_EVENT_TYPE_MASK         0x03FF0000
 
+#define EQ_CTL_INDEX_SHIFT             0
+#define EQ_CTL_INDEX_MASK              0x0000FFFF
+
 enum pq_init_status {
        PQ_INIT_STATUS_NA = 0,
        PQ_INIT_STATUS_READY_FOR_CP,
 
  *                                     previleged entity. FW sets this status
  *                                     bit for host. If this bit is set then
  *                                     GIC can not be accessed from host.
- *                                     Initialized in: armcpd
+ *                                     Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN       Event Queue (EQ) index is a running
+ *                                     index for each new event sent to host.
+ *                                     This is used as a method in host to
+ *                                     identify that the waiting event in
+ *                                     queue is actually a new event which
+ *                                     was not served before.
+ *                                     Initialized in: linux
  *
  * CPU_BOOT_DEV_STS0_ENABLED           Device status register enabled.
  *                                     This is a main indication that the
 #define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN              (1 << 17)
 #define CPU_BOOT_DEV_STS0_DYN_PLL_EN                   (1 << 19)
 #define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN            (1 << 20)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN                  (1 << 21)
 #define CPU_BOOT_DEV_STS0_ENABLED                      (1 << 31)
 #define CPU_BOOT_DEV_STS1_ENABLED                      (1 << 31)