habanalabs: add uapi to flush inbound HBM transactions
authorOhad Sharabi <osharabi@habana.ai>
Sun, 18 Dec 2022 07:42:34 +0000 (09:42 +0200)
committerOded Gabbay <ogabbay@kernel.org>
Thu, 26 Jan 2023 09:52:10 +0000 (11:52 +0200)
When doing p2p with a NIC device, the NIC needs to make sure all the
writes to the HBM (through the PCI bar of the Gaudi device) were
flushed.

It can be done by either the NIC or the host reading through the PCI
bar.

To support the host side, we supply a simple uapi to perform this flush
through the driver, because the user can't create such a transaction
by itself (the PCI bar isn't exposed to normal users).

Signed-off-by: Ohad Sharabi <osharabi@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
drivers/accel/habanalabs/common/command_submission.c
drivers/accel/habanalabs/common/habanalabs.h
drivers/accel/habanalabs/gaudi/gaudi.c
drivers/accel/habanalabs/gaudi2/gaudi2.c
drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
include/uapi/drm/habanalabs_accel.h

index f6ee103..bb9584d 100644 (file)
@@ -13,7 +13,8 @@
 
 #define HL_CS_FLAGS_TYPE_MASK  (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
                        HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
-                       HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+                       HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
+                       HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
 
 
 #define MAX_TS_ITER_NUM 10
@@ -1295,6 +1296,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
                return CS_UNRESERVE_SIGNALS;
        else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
                return CS_TYPE_ENGINE_CORE;
+       else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
+               return CS_TYPE_FLUSH_PCI_HBW_WRITES;
        else
                return CS_TYPE_DEFAULT;
 }
@@ -2443,6 +2446,21 @@ static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
        return rc;
 }
 
+static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
+{
+       struct hl_device *hdev = hpriv->hdev;
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+       if (!prop->hbw_flush_reg) {
+               dev_dbg(hdev->dev, "HBW flush is not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       RREG32(prop->hbw_flush_reg);
+
+       return 0;
+}
+
 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
 {
        union hl_cs_args *args = data;
@@ -2499,6 +2517,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
                rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
                                args->in.num_engine_cores, args->in.core_command);
                break;
+       case CS_TYPE_FLUSH_PCI_HBW_WRITES:
+               rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
+               break;
        default:
                rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
                                                args->in.cs_flags,
index 7b6f100..95bbc00 100644 (file)
@@ -375,7 +375,8 @@ enum hl_cs_type {
        CS_TYPE_COLLECTIVE_WAIT,
        CS_RESERVE_SIGNALS,
        CS_UNRESERVE_SIGNALS,
-       CS_TYPE_ENGINE_CORE
+       CS_TYPE_ENGINE_CORE,
+       CS_TYPE_FLUSH_PCI_HBW_WRITES,
 };
 
 /*
@@ -644,6 +645,8 @@ struct hl_hints_range {
  *                                      (i.e. the DRAM supports multiple page sizes), otherwise
  *                                      it will shall  be equal to dram_page_size.
  * @num_engine_cores: number of engine cpu cores
+ * @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
+ *                 not supported.
  * @collective_first_sob: first sync object available for collective use
  * @collective_first_mon: first monitor available for collective use
  * @sync_stream_first_sob: first sync object available for sync stream use
@@ -764,6 +767,7 @@ struct asic_fixed_properties {
        u32                             xbar_edge_enabled_mask;
        u32                             device_mem_alloc_default_page_size;
        u32                             num_engine_cores;
+       u32                             hbw_flush_reg;
        u16                             collective_first_sob;
        u16                             collective_first_mon;
        u16                             sync_stream_first_sob;
index 1b701a8..13f9e6c 100644 (file)
@@ -701,6 +701,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
 
        prop->dma_mask = 48;
 
+       prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL;
+
        return 0;
 }
 
index 7df1a68..4529a64 100644 (file)
@@ -2071,6 +2071,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
 
        prop->dma_mask = 64;
 
+       prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
+
        return 0;
 }
 
index 1a65766..23ee869 100644 (file)
 #define mmPSOC_TPC_PLL_NR                                            0xC73100
 #define mmIF_W_PLL_NR                                                0x488100
 
+#define mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL                          0xC01208
+
 #endif /* ASIC_REG_GAUDI_REGS_H_ */
index 90e6287..331567e 100644 (file)
@@ -1478,6 +1478,14 @@ struct hl_cs_chunk {
  */
 #define HL_CS_FLAGS_ENGINE_CORE_COMMAND                0x4000
 
+/*
+ * The flush HBW PCI writes is merged into the existing CS ioctls.
+ * Used to flush all HBW PCI writes.
+ * This is a blocking operation and for this reason the user shall not use
+ * the return sequence number (which will be invalid anyway)
+ */
+#define HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES       0x8000
+
 #define HL_CS_STATUS_SUCCESS           0
 
 #define HL_MAX_JOBS_PER_CS             512