int nvkm_falcon_dma_wr(struct nvkm_falcon *, const u8 *img, u64 dma_addr, u32 dma_base,
enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec);
bool nvkm_falcon_riscv_active(struct nvkm_falcon *);
+void nvkm_falcon_intr_retrigger(struct nvkm_falcon *);
int gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
int gm200_flcn_disable(struct nvkm_falcon *);
bool tu102_flcn_riscv_active(struct nvkm_falcon *);
+void ga100_flcn_intr_retrigger(struct nvkm_falcon *);
+
int ga102_flcn_select(struct nvkm_falcon *);
int ga102_flcn_reset_prep(struct nvkm_falcon *);
int ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
int (*enable)(struct nvkm_falcon *);
int (*select)(struct nvkm_falcon *);
u32 addr2;
+ u32 riscv_irqmask;
bool reset_pmc;
int (*reset_eng)(struct nvkm_falcon *);
int (*reset_prep)(struct nvkm_falcon *);
} cmdq, msgq;
bool (*riscv_active)(struct nvkm_falcon *);
+ void (*intr_retrigger)(struct nvkm_falcon *);
struct {
u32 *data;
void *priv;
} ntfy[16];
int ntfy_nr;
+ struct work_struct work;
} msgq;
bool running;
} device;
} internal;
+ struct {
+ enum nvkm_subdev_type type;
+ int inst;
+ u32 stall;
+ u32 nonstall;
+ } intr[32];
+ int intr_nr;
+
const struct nvkm_gsp_rm {
void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
return ret;
}
+int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+
int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
--- /dev/null
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#endif
--- /dev/null
+#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MC_ENGINE_IDX_GSP 49
+
+#endif
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
#include <subdev/timer.h>
#include <subdev/top.h>
+void
+nvkm_falcon_intr_retrigger(struct nvkm_falcon *falcon)
+{
+ if (falcon->func->intr_retrigger)
+ falcon->func->intr_retrigger(falcon);
+}
+
bool
nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
{
*/
#include "priv.h"
+void
+ga100_flcn_intr_retrigger(struct nvkm_falcon *falcon)
+{
+ nvkm_falcon_wr32(falcon, 0x3e8, 0x00000001);
+}
+
int
ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
{
*/
#include "priv.h"
+int
+nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+ for (int i = 0; i < gsp->intr_nr; i++) {
+ if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+ if (gsp->intr[i].nonstall != ~0)
+ return gsp->intr[i].nonstall;
+
+ return -EINVAL;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+ for (int i = 0; i < gsp->intr_nr; i++) {
+ if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+ if (gsp->intr[i].stall != ~0)
+ return gsp->intr[i].stall;
+
+ return -EINVAL;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int
nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
*/
#include "priv.h"
+static const struct nvkm_falcon_func
+ga100_gsp_flcn = {
+ .disable = gm200_flcn_disable,
+ .enable = gm200_flcn_enable,
+ .addr2 = 0x1000,
+ .riscv_irqmask = 0x2b4,
+ .reset_eng = gp102_flcn_reset_eng,
+ .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+ .bind_inst = gm200_flcn_bind_inst,
+ .bind_stat = gm200_flcn_bind_stat,
+ .bind_intr = true,
+ .imem_pio = &gm200_flcn_imem_pio,
+ .dmem_pio = &gm200_flcn_dmem_pio,
+ .riscv_active = tu102_flcn_riscv_active,
+ .intr_retrigger = ga100_flcn_intr_retrigger,
+};
+
static const struct nvkm_gsp_func
ga100_gsp_r535_54_03 = {
- .flcn = &tu102_gsp_flcn,
+ .flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
.enable = gm200_flcn_enable,
.select = ga102_flcn_select,
.addr2 = 0x1000,
+ .riscv_irqmask = 0x528,
.reset_eng = gp102_flcn_reset_eng,
.reset_prep = ga102_flcn_reset_prep,
.reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing,
.imem_dma = &ga102_flcn_dma,
.dmem_dma = &ga102_flcn_dma,
.riscv_active = ga102_flcn_riscv_active,
+ .intr_retrigger = ga100_flcn_intr_retrigger,
};
static const struct nvkm_gsp_func
#include <core/pci.h>
#include <subdev/timer.h>
+#include <subdev/vfn.h>
#include <engine/sec2.h>
#include <nvfw/fw.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl0080.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl2080.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
#include <nvrm/535.54.03/common/shared/msgq/inc/msgq/msgq_priv.h>
#include <nvrm/535.54.03/common/uproc/os/common/include/libos_init_args.h>
#include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
#include <nvrm/535.54.03/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
#include <nvrm/535.54.03/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
#include <nvrm/535.54.03/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
+#include <nvrm/535.54.03/nvidia/inc/kernel/gpu/intr/engine_idx.h>
#include <nvrm/535.54.03/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
#include <linux/acpi.h>
.device_dtor = r535_gsp_device_dtor,
};
+static void
+r535_gsp_msgq_work(struct work_struct *work)
+{
+ struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ r535_gsp_msg_recv(gsp, 0, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+}
+
+static irqreturn_t
+r535_gsp_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
+ u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
+ gsp->falcon.func->riscv_irqmask);
+ u32 stat = intr & inte;
+
+ if (!stat) {
+ nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
+ return IRQ_NONE;
+ }
+
+ if (stat & 0x00000040) {
+ nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
+ schedule_work(&gsp->msgq.work);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "intr %08x\n", stat);
+ nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
+ nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
+ }
+
+ nvkm_falcon_intr_retrigger(&gsp->falcon);
+ return IRQ_HANDLED;
+}
+
+static int
+r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
+{
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ int ret = 0;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (unsigned i = 0; i < ctrl->tableLen; i++) {
+ enum nvkm_subdev_type type;
+ int inst;
+
+ nvkm_debug(&gsp->subdev,
+ "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
+ ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
+ ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
+
+ switch (ctrl->table[i].engineIdx) {
+ case MC_ENGINE_IDX_GSP:
+ type = NVKM_SUBDEV_GSP;
+ inst = 0;
+ break;
+ default:
+ continue;
+ }
+
+ if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ gsp->intr[gsp->intr_nr].type = type;
+ gsp->intr[gsp->intr_nr].inst = inst;
+ gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
+ gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
+ gsp->intr_nr++;
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return ret;
+}
+
static int
r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
{
static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
+ struct nvkm_device *device = gsp->subdev.device;
int ret;
ret = r535_gsp_rpc_get_gsp_static_info(gsp);
if (WARN_ON(ret))
return ret;
+ INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
+
+ ret = r535_gsp_intr_get_table(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
+ if (WARN_ON(ret < 0))
+ return ret;
+
+ ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
+ r535_gsp_intr, &gsp->subdev.inth);
+ if (WARN_ON(ret))
+ return ret;
+
+ nvkm_inth_allow(&gsp->subdev.inth);
+ nvkm_wr32(device, 0x110004, 0x00000040);
return ret;
}
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.addr2 = 0x1000,
+ .riscv_irqmask = 0x2b4,
.reset_eng = gp102_flcn_reset_eng,
.reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
.bind_inst = gm200_flcn_bind_inst,