u32 size, u64 val);
static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
u32 num_regs, u32 val);
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
-
static inline enum hl_collective_mode
get_collective_mode(struct hl_device *hdev, u32 queue_id)
{
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
- u64 base_addr;
- int rc;
+ int i;
- base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob_group->base_sob_id * 4;
- rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id,
- base_addr, NUMBER_OF_SOBS_IN_GRP, 0);
- if (rc)
- dev_err(hdev->dev,
- "failed resetting sob group - sob base %u, count %u",
- hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP);
+ for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
+ WREG32((mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (hw_sob_group->base_sob_id * 4) + (i * 4)), 0);
kref_init(&hw_sob_group->kref);
}
queue_id = job->hw_queue_id;
prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
+ if (job->cs->encaps_signals) {
+ /* use the encaps signal handle store earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job,
+ cs_cmpl);
+
+ dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n",
+ job->cs->sequence,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+ }
+
/* Add to wait CBs using slave monitor */
wait_prop.data = (void *) job->user_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
wait_prop.size = cb_size;
dev_dbg(hdev->dev,
- "Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n",
+ "Generate slave wait CB, sob %d, val:%x, mon %d, q %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
prop->collective_slave_mon_id, queue_id);
prop->collective_sob_id, cb_size, false);
}
-static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
+static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
{
struct hl_cs_compl *signal_cs_cmpl =
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ /* In encaps signals case the SOB info will be retrieved from
+ * the handle in gaudi_collective_slave_init_job.
+ */
+ if (!cs->encaps_signals) {
+ /* copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+ /* Increment kref since all slave queues are now waiting on it */
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
/* Calculate the stream from collective master queue (1st job) */
job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
cprop->curr_sob_group_idx[stream], stream);
}
- /* Increment kref since all slave queues are now waiting on it */
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
static int gaudi_collective_wait_create_job(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
- enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id)
+ enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
+ u32 encaps_signal_offset)
{
struct hw_queue_properties *hw_queue_prop;
struct hl_cs_counters_atomic *cntr;
job->user_cb_size = cb_size;
job->hw_queue_id = queue_id;
+ /* since its guaranteed to have only one chunk in the collective wait
+ * cs, we can use this chunk to set the encapsulated signal offset
+ * in the jobs.
+ */
+ if (cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
+
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need
}
static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hw_queue_properties *hw_queue_prop;
if (i == 0) {
queue_id = wait_queue_id;
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_MASTER, queue_id, wait_queue_id);
+ HL_COLLECTIVE_MASTER, queue_id,
+ wait_queue_id, encaps_signal_offset);
} else {
if (nic_idx < NIC_NUMBER_OF_ENGINES) {
if (gaudi->hw_cap_initialized &
}
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id);
+ HL_COLLECTIVE_SLAVE, queue_id,
+ wait_queue_id, encaps_signal_offset);
}
if (rc)
return rc;
}
+ /* Scrub both SRAM and DRAM */
+ rc = hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
+ if (rc)
+ goto disable_pci_access;
+
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
goto disable_pci_access;
}
+ /* We only support a single ASID for the user, so for the sake of optimization, just
+ * initialize the ASID one time during device initialization with the fixed value of 1
+ */
+ gaudi_mmu_prepare(hdev, 1);
+
return 0;
disable_pci_access:
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
hdev->supports_staged_submission = true;
+ hdev->supports_wait_for_multi_cs = true;
- gaudi_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
return 0;
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
- WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0);
- WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0);
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
return rc;
}
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
-{
- struct hl_ctx *ctx;
- struct hl_pending_cb *pending_cb;
- struct packet_msg_long *pkt;
- u32 cb_size, ctl;
- struct hl_cb *cb;
- int i, rc;
-
- mutex_lock(&hdev->fpriv_list_lock);
- ctx = hdev->compute_ctx;
-
- /* If no compute context available or context is going down
- * memset registers directly
- */
- if (!ctx || kref_read(&ctx->refcount) == 0) {
- rc = gaudi_memset_registers(hdev, reg_base, num_regs, val);
- mutex_unlock(&hdev->fpriv_list_lock);
- return rc;
- }
-
- mutex_unlock(&hdev->fpriv_list_lock);
-
- cb_size = (sizeof(*pkt) * num_regs) +
- sizeof(struct packet_msg_prot) * 2;
-
- if (cb_size > SZ_2M) {
- dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
- return -ENOMEM;
- }
-
- pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL);
- if (!pending_cb)
- return -ENOMEM;
-
- cb = hl_cb_kernel_create(hdev, cb_size, false);
- if (!cb) {
- kfree(pending_cb);
- return -EFAULT;
- }
-
- pkt = cb->kernel_address;
-
- ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
-
- for (i = 0; i < num_regs ; i++, pkt++) {
- pkt->ctl = cpu_to_le32(ctl);
- pkt->value = cpu_to_le32(val);
- pkt->addr = cpu_to_le64(reg_base + (i * 4));
- }
-
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
-
- pending_cb->cb = cb;
- pending_cb->cb_size = cb_size;
- /* The queue ID MUST be an external queue ID. Otherwise, we will
- * have undefined behavior
- */
- pending_cb->hw_queue_id = hw_queue_id;
-
- spin_lock(&ctx->pending_cb_lock);
- list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- return 0;
-}
-
static int gaudi_restore_sm_registers(struct hl_device *hdev)
{
u64 base_addr;
asid);
}
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
+
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
- WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset,
+ BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT) | BIT(DMA0_CORE_PROT_VAL_SHIFT));
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
job->job_cb_size, cb->bus_address);
}
free_fence_ptr:
- WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
- ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
fence_dma_addr);
u8 cause;
int rc;
+ if (event_type >= GAUDI_EVENT_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GAUDI_EVENT_SIZE - 1);
+ return;
+ }
+
gaudi->events_stat[event_type]++;
gaudi->events_stat_aggregate[event_type]++;
if (ctx->asid == HL_KERNEL_ASID_ID)
return 0;
- gaudi_mmu_prepare(ctx->hdev, ctx->asid);
return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
}
sizeof(struct packet_msg_prot) * 2;
}
+static u32 gaudi_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + (sob_id * 4);
+}
+
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb)
{
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
- int rc;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
- rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx,
- CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob->sob_id * 4, 1, 0);
- if (rc)
- dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id);
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob->sob_id * 4, 0);
kref_init(&hw_sob->kref);
}
.map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
.init_firmware_loader = gaudi_init_firmware_loader,
.init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
- .state_dump_init = gaudi_state_dump_init
+ .state_dump_init = gaudi_state_dump_init,
+ .get_sob_addr = gaudi_get_sob_addr,
+ .set_pci_memory_regions = gaudi_set_pci_memory_regions
};
/**