return (nak_r + nak_g);
}
+#define MMIO_REG_HOLE_OFFSET 0x1A000
+
+static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev) && (PAGE_SIZE <= 4096)) {
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ } else {
+ adev->rmmio_remap.reg_offset =
+ SOC15_REG_OFFSET(
+ NBIO, 0,
+ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
+ << 2;
+ adev->rmmio_remap.bus_addr = 0;
+ }
+}
+
const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset,
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.init_registers = nbio_v7_9_init_registers,
.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
+ .set_reg_remap = nbio_v7_9_set_reg_remap,
};
static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev,