f1300d5f4f87fde123a32ecb251e184449ba6435
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_offset.h"
42 #include "oss/osssys_4_0_offset.h"
43
44 #include "soc15.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "umc_v6_1.h"
54
55 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
56
57 #include "amdgpu_ras.h"
58
59 /* add these here since we already include dce12 headers and these are for DCN */
60 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
61 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
62 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
63 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
66
67 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
68 #define AMDGPU_NUM_OF_VMIDS                     8
69
70 static const u32 golden_settings_vega10_hdp[] =
71 {
72         0xf64, 0x0fffffff, 0x00000000,
73         0xf65, 0x0fffffff, 0x00000000,
74         0xf66, 0x0fffffff, 0x00000000,
75         0xf67, 0x0fffffff, 0x00000000,
76         0xf68, 0x0fffffff, 0x00000000,
77         0xf6a, 0x0fffffff, 0x00000000,
78         0xf6b, 0x0fffffff, 0x00000000,
79         0xf6c, 0x0fffffff, 0x00000000,
80         0xf6d, 0x0fffffff, 0x00000000,
81         0xf6e, 0x0fffffff, 0x00000000,
82 };
83
84 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
85 {
86         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
87         SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
88 };
89
90 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
91 {
92         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
93         SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
94 };
95
96 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
97         (0x000143c0 + 0x00000000),
98         (0x000143c0 + 0x00000800),
99         (0x000143c0 + 0x00001000),
100         (0x000143c0 + 0x00001800),
101         (0x000543c0 + 0x00000000),
102         (0x000543c0 + 0x00000800),
103         (0x000543c0 + 0x00001000),
104         (0x000543c0 + 0x00001800),
105         (0x000943c0 + 0x00000000),
106         (0x000943c0 + 0x00000800),
107         (0x000943c0 + 0x00001000),
108         (0x000943c0 + 0x00001800),
109         (0x000d43c0 + 0x00000000),
110         (0x000d43c0 + 0x00000800),
111         (0x000d43c0 + 0x00001000),
112         (0x000d43c0 + 0x00001800),
113         (0x001143c0 + 0x00000000),
114         (0x001143c0 + 0x00000800),
115         (0x001143c0 + 0x00001000),
116         (0x001143c0 + 0x00001800),
117         (0x001543c0 + 0x00000000),
118         (0x001543c0 + 0x00000800),
119         (0x001543c0 + 0x00001000),
120         (0x001543c0 + 0x00001800),
121         (0x001943c0 + 0x00000000),
122         (0x001943c0 + 0x00000800),
123         (0x001943c0 + 0x00001000),
124         (0x001943c0 + 0x00001800),
125         (0x001d43c0 + 0x00000000),
126         (0x001d43c0 + 0x00000800),
127         (0x001d43c0 + 0x00001000),
128         (0x001d43c0 + 0x00001800),
129 };
130
131 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
132         (0x000143e0 + 0x00000000),
133         (0x000143e0 + 0x00000800),
134         (0x000143e0 + 0x00001000),
135         (0x000143e0 + 0x00001800),
136         (0x000543e0 + 0x00000000),
137         (0x000543e0 + 0x00000800),
138         (0x000543e0 + 0x00001000),
139         (0x000543e0 + 0x00001800),
140         (0x000943e0 + 0x00000000),
141         (0x000943e0 + 0x00000800),
142         (0x000943e0 + 0x00001000),
143         (0x000943e0 + 0x00001800),
144         (0x000d43e0 + 0x00000000),
145         (0x000d43e0 + 0x00000800),
146         (0x000d43e0 + 0x00001000),
147         (0x000d43e0 + 0x00001800),
148         (0x001143e0 + 0x00000000),
149         (0x001143e0 + 0x00000800),
150         (0x001143e0 + 0x00001000),
151         (0x001143e0 + 0x00001800),
152         (0x001543e0 + 0x00000000),
153         (0x001543e0 + 0x00000800),
154         (0x001543e0 + 0x00001000),
155         (0x001543e0 + 0x00001800),
156         (0x001943e0 + 0x00000000),
157         (0x001943e0 + 0x00000800),
158         (0x001943e0 + 0x00001000),
159         (0x001943e0 + 0x00001800),
160         (0x001d43e0 + 0x00000000),
161         (0x001d43e0 + 0x00000800),
162         (0x001d43e0 + 0x00001000),
163         (0x001d43e0 + 0x00001800),
164 };
165
166 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
167         (0x000143c2 + 0x00000000),
168         (0x000143c2 + 0x00000800),
169         (0x000143c2 + 0x00001000),
170         (0x000143c2 + 0x00001800),
171         (0x000543c2 + 0x00000000),
172         (0x000543c2 + 0x00000800),
173         (0x000543c2 + 0x00001000),
174         (0x000543c2 + 0x00001800),
175         (0x000943c2 + 0x00000000),
176         (0x000943c2 + 0x00000800),
177         (0x000943c2 + 0x00001000),
178         (0x000943c2 + 0x00001800),
179         (0x000d43c2 + 0x00000000),
180         (0x000d43c2 + 0x00000800),
181         (0x000d43c2 + 0x00001000),
182         (0x000d43c2 + 0x00001800),
183         (0x001143c2 + 0x00000000),
184         (0x001143c2 + 0x00000800),
185         (0x001143c2 + 0x00001000),
186         (0x001143c2 + 0x00001800),
187         (0x001543c2 + 0x00000000),
188         (0x001543c2 + 0x00000800),
189         (0x001543c2 + 0x00001000),
190         (0x001543c2 + 0x00001800),
191         (0x001943c2 + 0x00000000),
192         (0x001943c2 + 0x00000800),
193         (0x001943c2 + 0x00001000),
194         (0x001943c2 + 0x00001800),
195         (0x001d43c2 + 0x00000000),
196         (0x001d43c2 + 0x00000800),
197         (0x001d43c2 + 0x00001000),
198         (0x001d43c2 + 0x00001800),
199 };
200
201 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
202                 struct amdgpu_irq_src *src,
203                 unsigned type,
204                 enum amdgpu_interrupt_state state)
205 {
206         u32 bits, i, tmp, reg;
207
208         bits = 0x7f;
209
210         switch (state) {
211         case AMDGPU_IRQ_STATE_DISABLE:
212                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
213                         reg = ecc_umc_mcumc_ctrl_addrs[i];
214                         tmp = RREG32(reg);
215                         tmp &= ~bits;
216                         WREG32(reg, tmp);
217                 }
218                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
219                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
220                         tmp = RREG32(reg);
221                         tmp &= ~bits;
222                         WREG32(reg, tmp);
223                 }
224                 break;
225         case AMDGPU_IRQ_STATE_ENABLE:
226                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
227                         reg = ecc_umc_mcumc_ctrl_addrs[i];
228                         tmp = RREG32(reg);
229                         tmp |= bits;
230                         WREG32(reg, tmp);
231                 }
232                 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
233                         reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
234                         tmp = RREG32(reg);
235                         tmp |= bits;
236                         WREG32(reg, tmp);
237                 }
238                 break;
239         default:
240                 break;
241         }
242
243         return 0;
244 }
245
246 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
247                 struct ras_err_data *err_data,
248                 struct amdgpu_iv_entry *entry)
249 {
250         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
251                 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
252                 if (adev->umc.funcs->query_ras_error_count)
253                         adev->umc.funcs->query_ras_error_count(adev, err_data);
254                 /* umc query_ras_error_address is also responsible for clearing
255                  * error status
256                  */
257                 if (adev->umc.funcs->query_ras_error_address)
258                         adev->umc.funcs->query_ras_error_address(adev, err_data);
259
260                 /* only uncorrectable error needs gpu reset */
261                 if (err_data->ue_count)
262                         amdgpu_ras_reset_gpu(adev, 0);
263         }
264
265         return AMDGPU_RAS_SUCCESS;
266 }
267
268 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
269                 struct amdgpu_irq_src *source,
270                 struct amdgpu_iv_entry *entry)
271 {
272         struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
273         struct ras_dispatch_if ih_data = {
274                 .entry = entry,
275         };
276
277         if (!ras_if)
278                 return 0;
279
280         ih_data.head = *ras_if;
281
282         amdgpu_ras_interrupt_dispatch(adev, &ih_data);
283         return 0;
284 }
285
286 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
287                                         struct amdgpu_irq_src *src,
288                                         unsigned type,
289                                         enum amdgpu_interrupt_state state)
290 {
291         struct amdgpu_vmhub *hub;
292         u32 tmp, reg, bits, i, j;
293
294         bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
295                 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
296                 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
297                 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
298                 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
299                 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
300                 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
301
302         switch (state) {
303         case AMDGPU_IRQ_STATE_DISABLE:
304                 for (j = 0; j < adev->num_vmhubs; j++) {
305                         hub = &adev->vmhub[j];
306                         for (i = 0; i < 16; i++) {
307                                 reg = hub->vm_context0_cntl + i;
308                                 tmp = RREG32(reg);
309                                 tmp &= ~bits;
310                                 WREG32(reg, tmp);
311                         }
312                 }
313                 break;
314         case AMDGPU_IRQ_STATE_ENABLE:
315                 for (j = 0; j < adev->num_vmhubs; j++) {
316                         hub = &adev->vmhub[j];
317                         for (i = 0; i < 16; i++) {
318                                 reg = hub->vm_context0_cntl + i;
319                                 tmp = RREG32(reg);
320                                 tmp |= bits;
321                                 WREG32(reg, tmp);
322                         }
323                 }
324         default:
325                 break;
326         }
327
328         return 0;
329 }
330
331 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
332                                 struct amdgpu_irq_src *source,
333                                 struct amdgpu_iv_entry *entry)
334 {
335         struct amdgpu_vmhub *hub;
336         bool retry_fault = !!(entry->src_data[1] & 0x80);
337         uint32_t status = 0;
338         u64 addr;
339         char hub_name[10];
340
341         addr = (u64)entry->src_data[0] << 12;
342         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
343
344         if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
345                                                     entry->timestamp))
346                 return 1; /* This also prevents sending it to KFD */
347
348         if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
349                 snprintf(hub_name, sizeof(hub_name), "mmhub0");
350                 hub = &adev->vmhub[AMDGPU_MMHUB_0];
351         } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
352                 snprintf(hub_name, sizeof(hub_name), "mmhub1");
353                 hub = &adev->vmhub[AMDGPU_MMHUB_1];
354         } else {
355                 snprintf(hub_name, sizeof(hub_name), "gfxhub0");
356                 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
357         }
358
359         /* If it's the first fault for this address, process it normally */
360         if (!amdgpu_sriov_vf(adev)) {
361                 /*
362                  * Issue a dummy read to wait for the status register to
363                  * be updated to avoid reading an incorrect value due to
364                  * the new fast GRBM interface.
365                  */
366                 if (entry->vmid_src == AMDGPU_GFXHUB_0)
367                         RREG32(hub->vm_l2_pro_fault_status);
368
369                 status = RREG32(hub->vm_l2_pro_fault_status);
370                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
371         }
372
373         if (printk_ratelimit()) {
374                 struct amdgpu_task_info task_info;
375
376                 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
377                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
378
379                 dev_err(adev->dev,
380                         "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
381                         "pasid:%u, for process %s pid %d thread %s pid %d)\n",
382                         hub_name, retry_fault ? "retry" : "no-retry",
383                         entry->src_id, entry->ring_id, entry->vmid,
384                         entry->pasid, task_info.process_name, task_info.tgid,
385                         task_info.task_name, task_info.pid);
386                 dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
387                         addr, entry->client_id);
388                 if (!amdgpu_sriov_vf(adev)) {
389                         dev_err(adev->dev,
390                                 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
391                                 status);
392                         dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
393                                 REG_GET_FIELD(status,
394                                 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
395                         dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
396                                 REG_GET_FIELD(status,
397                                 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
398                         dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
399                                 REG_GET_FIELD(status,
400                                 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
401                         dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
402                                 REG_GET_FIELD(status,
403                                 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
404                         dev_err(adev->dev, "\t RW: 0x%lx\n",
405                                 REG_GET_FIELD(status,
406                                 VM_L2_PROTECTION_FAULT_STATUS, RW));
407
408                 }
409         }
410
411         return 0;
412 }
413
414 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
415         .set = gmc_v9_0_vm_fault_interrupt_state,
416         .process = gmc_v9_0_process_interrupt,
417 };
418
419
420 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
421         .set = gmc_v9_0_ecc_interrupt_state,
422         .process = gmc_v9_0_process_ecc_irq,
423 };
424
425 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
426 {
427         adev->gmc.vm_fault.num_types = 1;
428         adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
429
430         adev->gmc.ecc_irq.num_types = 1;
431         adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
432 }
433
434 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
435                                         uint32_t flush_type)
436 {
437         u32 req = 0;
438
439         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
440                             PER_VMID_INVALIDATE_REQ, 1 << vmid);
441         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
442         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
443         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
444         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
445         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
446         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
447         req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
448                             CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
449
450         return req;
451 }
452
453 /*
454  * GART
455  * VMID 0 is the physical GPU addresses as used by the kernel.
456  * VMIDs 1-15 are used for userspace clients and are handled
457  * by the amdgpu vm/hsa code.
458  */
459
460 /**
461  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
462  *
463  * @adev: amdgpu_device pointer
464  * @vmid: vm instance to flush
465  * @flush_type: the flush type
466  *
467  * Flush the TLB for the requested page table using certain type.
468  */
469 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
470                                         uint32_t vmhub, uint32_t flush_type)
471 {
472         const unsigned eng = 17;
473         u32 j, tmp;
474         struct amdgpu_vmhub *hub;
475
476         BUG_ON(vmhub >= adev->num_vmhubs);
477
478         hub = &adev->vmhub[vmhub];
479         tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
480
481         /* This is necessary for a HW workaround under SRIOV as well
482          * as GFXOFF under bare metal
483          */
484         if (adev->gfx.kiq.ring.sched.ready &&
485                         (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
486                         !adev->in_gpu_reset) {
487                 uint32_t req = hub->vm_inv_eng0_req + eng;
488                 uint32_t ack = hub->vm_inv_eng0_ack + eng;
489
490                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
491                                 1 << vmid);
492                 return;
493         }
494
495         spin_lock(&adev->gmc.invalidate_lock);
496         WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
497
498         /*
499          * Issue a dummy read to wait for the ACK register to be cleared
500          * to avoid a false ACK due to the new fast GRBM interface.
501          */
502         if (vmhub == AMDGPU_GFXHUB_0)
503                 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
504
505         for (j = 0; j < adev->usec_timeout; j++) {
506                 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
507                 if (tmp & (1 << vmid))
508                         break;
509                 udelay(1);
510         }
511         spin_unlock(&adev->gmc.invalidate_lock);
512         if (j < adev->usec_timeout)
513                 return;
514
515         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
516 }
517
518 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
519                                             unsigned vmid, uint64_t pd_addr)
520 {
521         struct amdgpu_device *adev = ring->adev;
522         struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
523         uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
524         unsigned eng = ring->vm_inv_eng;
525
526         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
527                               lower_32_bits(pd_addr));
528
529         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
530                               upper_32_bits(pd_addr));
531
532         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
533                                             hub->vm_inv_eng0_ack + eng,
534                                             req, 1 << vmid);
535
536         return pd_addr;
537 }
538
539 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
540                                         unsigned pasid)
541 {
542         struct amdgpu_device *adev = ring->adev;
543         uint32_t reg;
544
545         /* Do nothing because there's no lut register for mmhub1. */
546         if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
547                 return;
548
549         if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
550                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
551         else
552                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
553
554         amdgpu_ring_emit_wreg(ring, reg, pasid);
555 }
556
557 /*
558  * PTE format on VEGA 10:
559  * 63:59 reserved
560  * 58:57 mtype
561  * 56 F
562  * 55 L
563  * 54 P
564  * 53 SW
565  * 52 T
566  * 50:48 reserved
567  * 47:12 4k physical page base address
568  * 11:7 fragment
569  * 6 write
570  * 5 read
571  * 4 exe
572  * 3 Z
573  * 2 snooped
574  * 1 system
575  * 0 valid
576  *
577  * PDE format on VEGA 10:
578  * 63:59 block fragment size
579  * 58:55 reserved
580  * 54 P
581  * 53:48 reserved
582  * 47:6 physical base address of PD or PTE
583  * 5:3 reserved
584  * 2 C
585  * 1 system
586  * 0 valid
587  */
588
589 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
590                                                 uint32_t flags)
591
592 {
593         uint64_t pte_flag = 0;
594
595         if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
596                 pte_flag |= AMDGPU_PTE_EXECUTABLE;
597         if (flags & AMDGPU_VM_PAGE_READABLE)
598                 pte_flag |= AMDGPU_PTE_READABLE;
599         if (flags & AMDGPU_VM_PAGE_WRITEABLE)
600                 pte_flag |= AMDGPU_PTE_WRITEABLE;
601
602         switch (flags & AMDGPU_VM_MTYPE_MASK) {
603         case AMDGPU_VM_MTYPE_DEFAULT:
604                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
605                 break;
606         case AMDGPU_VM_MTYPE_NC:
607                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
608                 break;
609         case AMDGPU_VM_MTYPE_WC:
610                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
611                 break;
612         case AMDGPU_VM_MTYPE_RW:
613                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
614                 break;
615         case AMDGPU_VM_MTYPE_CC:
616                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
617                 break;
618         case AMDGPU_VM_MTYPE_UC:
619                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
620                 break;
621         default:
622                 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
623                 break;
624         }
625
626         if (flags & AMDGPU_VM_PAGE_PRT)
627                 pte_flag |= AMDGPU_PTE_PRT;
628
629         return pte_flag;
630 }
631
632 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
633                                 uint64_t *addr, uint64_t *flags)
634 {
635         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
636                 *addr = adev->vm_manager.vram_base_offset + *addr -
637                         adev->gmc.vram_start;
638         BUG_ON(*addr & 0xFFFF00000000003FULL);
639
640         if (!adev->gmc.translate_further)
641                 return;
642
643         if (level == AMDGPU_VM_PDB1) {
644                 /* Set the block fragment size */
645                 if (!(*flags & AMDGPU_PDE_PTE))
646                         *flags |= AMDGPU_PDE_BFS(0x9);
647
648         } else if (level == AMDGPU_VM_PDB0) {
649                 if (*flags & AMDGPU_PDE_PTE)
650                         *flags &= ~AMDGPU_PDE_PTE;
651                 else
652                         *flags |= AMDGPU_PTE_TF;
653         }
654 }
655
656 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
657         .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
658         .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
659         .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
660         .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
661         .get_vm_pde = gmc_v9_0_get_vm_pde
662 };
663
664 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
665 {
666         adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
667 }
668
669 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
670 {
671         switch (adev->asic_type) {
672         case CHIP_VEGA20:
673                 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
674                 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
675                 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
676                 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
677                 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
678                 adev->umc.funcs = &umc_v6_1_funcs;
679                 break;
680         default:
681                 break;
682         }
683 }
684
685 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
686 {
687         switch (adev->asic_type) {
688         case CHIP_VEGA20:
689                 adev->mmhub_funcs = &mmhub_v1_0_funcs;
690                 break;
691         default:
692                 break;
693         }
694 }
695
696 static int gmc_v9_0_early_init(void *handle)
697 {
698         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
699
700         gmc_v9_0_set_gmc_funcs(adev);
701         gmc_v9_0_set_irq_funcs(adev);
702         gmc_v9_0_set_umc_funcs(adev);
703         gmc_v9_0_set_mmhub_funcs(adev);
704
705         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
706         adev->gmc.shared_aperture_end =
707                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
708         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
709         adev->gmc.private_aperture_end =
710                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
711
712         return 0;
713 }
714
715 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
716 {
717
718         /*
719          * TODO:
720          * Currently there is a bug where some memory client outside
721          * of the driver writes to first 8M of VRAM on S3 resume,
722          * this overrides GART which by default gets placed in first 8M and
723          * causes VM_FAULTS once GTT is accessed.
724          * Keep the stolen memory reservation until the while this is not solved.
725          * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
726          */
727         switch (adev->asic_type) {
728         case CHIP_VEGA10:
729         case CHIP_RAVEN:
730         case CHIP_ARCTURUS:
731         case CHIP_RENOIR:
732                 return true;
733         case CHIP_VEGA12:
734         case CHIP_VEGA20:
735         default:
736                 return false;
737         }
738 }
739
740 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
741 {
742         struct amdgpu_ring *ring;
743         unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
744                 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
745                 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
746         unsigned i;
747         unsigned vmhub, inv_eng;
748
749         for (i = 0; i < adev->num_rings; ++i) {
750                 ring = adev->rings[i];
751                 vmhub = ring->funcs->vmhub;
752
753                 inv_eng = ffs(vm_inv_engs[vmhub]);
754                 if (!inv_eng) {
755                         dev_err(adev->dev, "no VM inv eng for ring %s\n",
756                                 ring->name);
757                         return -EINVAL;
758                 }
759
760                 ring->vm_inv_eng = inv_eng - 1;
761                 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
762
763                 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
764                          ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
765         }
766
767         return 0;
768 }
769
770 static int gmc_v9_0_ecc_late_init(void *handle)
771 {
772         int r;
773         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774         struct ras_fs_if umc_fs_info = {
775                 .sysfs_name = "umc_err_count",
776                 .debugfs_name = "umc_err_inject",
777         };
778         struct ras_ih_if umc_ih_info = {
779                 .cb = gmc_v9_0_process_ras_data_cb,
780         };
781
782         if (!adev->gmc.umc_ras_if) {
783                 adev->gmc.umc_ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
784                 if (!adev->gmc.umc_ras_if)
785                         return -ENOMEM;
786                 adev->gmc.umc_ras_if->block = AMDGPU_RAS_BLOCK__UMC;
787                 adev->gmc.umc_ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
788                 adev->gmc.umc_ras_if->sub_block_index = 0;
789                 strcpy(adev->gmc.umc_ras_if->name, "umc");
790         }
791         umc_ih_info.head = umc_fs_info.head = *adev->gmc.umc_ras_if;
792
793         r = amdgpu_ras_late_init(adev, adev->gmc.umc_ras_if,
794                                  &umc_fs_info, &umc_ih_info);
795         if (r)
796                 goto free;
797
798         if (amdgpu_ras_is_supported(adev, adev->gmc.umc_ras_if->block)) {
799                 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
800                 if (r)
801                         goto umc_late_fini;
802         } else
803                 kfree(adev->gmc.umc_ras_if);
804
805         if (adev->mmhub_funcs && adev->mmhub_funcs->ras_late_init) {
806                 r = adev->mmhub_funcs->ras_late_init(adev);
807                 if (r)
808                         return r;
809         }
810         return 0;
811 umc_late_fini:
812         amdgpu_ras_late_fini(adev, adev->gmc.umc_ras_if, &umc_ih_info);
813 free:
814         kfree(adev->gmc.umc_ras_if);
815         return r;
816 }
817
818 static int gmc_v9_0_late_init(void *handle)
819 {
820         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821         bool r;
822
823         if (!gmc_v9_0_keep_stolen_memory(adev))
824                 amdgpu_bo_late_init(adev);
825
826         r = gmc_v9_0_allocate_vm_inv_eng(adev);
827         if (r)
828                 return r;
829         /* Check if ecc is available */
830         if (!amdgpu_sriov_vf(adev)) {
831                 switch (adev->asic_type) {
832                 case CHIP_VEGA10:
833                 case CHIP_VEGA20:
834                         r = amdgpu_atomfirmware_mem_ecc_supported(adev);
835                         if (!r) {
836                                 DRM_INFO("ECC is not present.\n");
837                                 if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
838                                         adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
839                         } else {
840                                 DRM_INFO("ECC is active.\n");
841                         }
842
843                         r = amdgpu_atomfirmware_sram_ecc_supported(adev);
844                         if (!r) {
845                                 DRM_INFO("SRAM ECC is not present.\n");
846                         } else {
847                                 DRM_INFO("SRAM ECC is active.\n");
848                         }
849                         break;
850                 default:
851                         break;
852                 }
853         }
854
855         r = gmc_v9_0_ecc_late_init(handle);
856         if (r)
857                 return r;
858
859         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
860 }
861
862 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
863                                         struct amdgpu_gmc *mc)
864 {
865         u64 base = 0;
866
867         if (adev->asic_type == CHIP_ARCTURUS)
868                 base = mmhub_v9_4_get_fb_location(adev);
869         else if (!amdgpu_sriov_vf(adev))
870                 base = mmhub_v1_0_get_fb_location(adev);
871
872         /* add the xgmi offset of the physical node */
873         base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
874         amdgpu_gmc_vram_location(adev, mc, base);
875         amdgpu_gmc_gart_location(adev, mc);
876         amdgpu_gmc_agp_location(adev, mc);
877         /* base offset of vram pages */
878         adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
879
880         /* XXX: add the xgmi offset of the physical node? */
881         adev->vm_manager.vram_base_offset +=
882                 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
883 }
884
885 /**
886  * gmc_v9_0_mc_init - initialize the memory controller driver params
887  *
888  * @adev: amdgpu_device pointer
889  *
890  * Look up the amount of vram, vram width, and decide how to place
891  * vram and gart within the GPU's physical address space.
892  * Returns 0 for success.
893  */
894 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
895 {
896         int chansize, numchan;
897         int r;
898
899         if (amdgpu_sriov_vf(adev)) {
900                 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
901                  * and DF related registers is not readable, seems hardcord is the
902                  * only way to set the correct vram_width
903                  */
904                 adev->gmc.vram_width = 2048;
905         } else if (amdgpu_emu_mode != 1) {
906                 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
907         }
908
909         if (!adev->gmc.vram_width) {
910                 /* hbm memory channel size */
911                 if (adev->flags & AMD_IS_APU)
912                         chansize = 64;
913                 else
914                         chansize = 128;
915
916                 numchan = adev->df_funcs->get_hbm_channel_number(adev);
917                 adev->gmc.vram_width = numchan * chansize;
918         }
919
920         /* size in MB on si */
921         adev->gmc.mc_vram_size =
922                 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
923         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
924
925         if (!(adev->flags & AMD_IS_APU)) {
926                 r = amdgpu_device_resize_fb_bar(adev);
927                 if (r)
928                         return r;
929         }
930         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
931         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
932
933 #ifdef CONFIG_X86_64
934         if (adev->flags & AMD_IS_APU) {
935                 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
936                 adev->gmc.aper_size = adev->gmc.real_vram_size;
937         }
938 #endif
939         /* In case the PCI BAR is larger than the actual amount of vram */
940         adev->gmc.visible_vram_size = adev->gmc.aper_size;
941         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
942                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
943
944         /* set the gart size */
945         if (amdgpu_gart_size == -1) {
946                 switch (adev->asic_type) {
947                 case CHIP_VEGA10:  /* all engines support GPUVM */
948                 case CHIP_VEGA12:  /* all engines support GPUVM */
949                 case CHIP_VEGA20:
950                 case CHIP_ARCTURUS:
951                 default:
952                         adev->gmc.gart_size = 512ULL << 20;
953                         break;
954                 case CHIP_RAVEN:   /* DCE SG support */
955                 case CHIP_RENOIR:
956                         adev->gmc.gart_size = 1024ULL << 20;
957                         break;
958                 }
959         } else {
960                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
961         }
962
963         gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
964
965         return 0;
966 }
967
968 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
969 {
970         int r;
971
972         if (adev->gart.bo) {
973                 WARN(1, "VEGA10 PCIE GART already initialized\n");
974                 return 0;
975         }
976         /* Initialize common gart structure */
977         r = amdgpu_gart_init(adev);
978         if (r)
979                 return r;
980         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
981         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
982                                  AMDGPU_PTE_EXECUTABLE;
983         return amdgpu_gart_table_vram_alloc(adev);
984 }
985
986 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
987 {
988         u32 d1vga_control;
989         unsigned size;
990
991         /*
992          * TODO Remove once GART corruption is resolved
993          * Check related code in gmc_v9_0_sw_fini
994          * */
995         if (gmc_v9_0_keep_stolen_memory(adev))
996                 return 9 * 1024 * 1024;
997
998         d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
999         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1000                 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1001         } else {
1002                 u32 viewport;
1003
1004                 switch (adev->asic_type) {
1005                 case CHIP_RAVEN:
1006                 case CHIP_RENOIR:
1007                         viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1008                         size = (REG_GET_FIELD(viewport,
1009                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1010                                 REG_GET_FIELD(viewport,
1011                                               HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1012                                 4);
1013                         break;
1014                 case CHIP_VEGA10:
1015                 case CHIP_VEGA12:
1016                 case CHIP_VEGA20:
1017                 default:
1018                         viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1019                         size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1020                                 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1021                                 4);
1022                         break;
1023                 }
1024         }
1025         /* return 0 if the pre-OS buffer uses up most of vram */
1026         if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1027                 return 0;
1028
1029         return size;
1030 }
1031
1032 static int gmc_v9_0_sw_init(void *handle)
1033 {
1034         int r;
1035         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1036
1037         gfxhub_v1_0_init(adev);
1038         if (adev->asic_type == CHIP_ARCTURUS)
1039                 mmhub_v9_4_init(adev);
1040         else
1041                 mmhub_v1_0_init(adev);
1042
1043         spin_lock_init(&adev->gmc.invalidate_lock);
1044
1045         adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1046         switch (adev->asic_type) {
1047         case CHIP_RAVEN:
1048                 adev->num_vmhubs = 2;
1049
1050                 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1051                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1052                 } else {
1053                         /* vm_size is 128TB + 512GB for legacy 3-level page support */
1054                         amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1055                         adev->gmc.translate_further =
1056                                 adev->vm_manager.num_level > 1;
1057                 }
1058                 break;
1059         case CHIP_VEGA10:
1060         case CHIP_VEGA12:
1061         case CHIP_VEGA20:
1062         case CHIP_RENOIR:
1063                 adev->num_vmhubs = 2;
1064
1065
1066                 /*
1067                  * To fulfill 4-level page support,
1068                  * vm size is 256TB (48bit), maximum size of Vega10,
1069                  * block size 512 (9bit)
1070                  */
1071                 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1072                 if (amdgpu_sriov_vf(adev))
1073                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1074                 else
1075                         amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1076                 break;
1077         case CHIP_ARCTURUS:
1078                 adev->num_vmhubs = 3;
1079
1080                 /* Keep the vm size same with Vega20 */
1081                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1082                 break;
1083         default:
1084                 break;
1085         }
1086
1087         /* This interrupt is VMC page fault.*/
1088         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1089                                 &adev->gmc.vm_fault);
1090         if (r)
1091                 return r;
1092
1093         if (adev->asic_type == CHIP_ARCTURUS) {
1094                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1095                                         &adev->gmc.vm_fault);
1096                 if (r)
1097                         return r;
1098         }
1099
1100         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1101                                 &adev->gmc.vm_fault);
1102
1103         if (r)
1104                 return r;
1105
1106         /* interrupt sent to DF. */
1107         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1108                         &adev->gmc.ecc_irq);
1109         if (r)
1110                 return r;
1111
1112         /* Set the internal MC address mask
1113          * This is the max address of the GPU's
1114          * internal address space.
1115          */
1116         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1117
1118         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1119         if (r) {
1120                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1121                 return r;
1122         }
1123         adev->need_swiotlb = drm_need_swiotlb(44);
1124
1125         if (adev->gmc.xgmi.supported) {
1126                 r = gfxhub_v1_1_get_xgmi_info(adev);
1127                 if (r)
1128                         return r;
1129         }
1130
1131         r = gmc_v9_0_mc_init(adev);
1132         if (r)
1133                 return r;
1134
1135         adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1136
1137         /* Memory manager */
1138         r = amdgpu_bo_init(adev);
1139         if (r)
1140                 return r;
1141
1142         r = gmc_v9_0_gart_init(adev);
1143         if (r)
1144                 return r;
1145
1146         /*
1147          * number of VMs
1148          * VMID 0 is reserved for System
1149          * amdgpu graphics/compute will use VMIDs 1-7
1150          * amdkfd will use VMIDs 8-15
1151          */
1152         adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1153         adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1154         adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1155
1156         amdgpu_vm_manager_init(adev);
1157
1158         return 0;
1159 }
1160
1161 static int gmc_v9_0_sw_fini(void *handle)
1162 {
1163         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1164         void *stolen_vga_buf;
1165
1166         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1167                         adev->gmc.umc_ras_if) {
1168                 struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1169                 struct ras_ih_if ih_info = {
1170                         .head = *ras_if,
1171                 };
1172
1173                 /* remove fs first */
1174                 amdgpu_ras_debugfs_remove(adev, ras_if);
1175                 amdgpu_ras_sysfs_remove(adev, ras_if);
1176                 /* remove the IH */
1177                 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1178                 amdgpu_ras_feature_enable(adev, ras_if, 0);
1179                 kfree(ras_if);
1180         }
1181
1182         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1183                         adev->gmc.mmhub_ras_if) {
1184                 struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1185
1186                 /* remove fs and disable ras feature */
1187                 amdgpu_ras_debugfs_remove(adev, ras_if);
1188                 amdgpu_ras_sysfs_remove(adev, ras_if);
1189                 amdgpu_ras_feature_enable(adev, ras_if, 0);
1190                 kfree(ras_if);
1191         }
1192
1193         amdgpu_gem_force_release(adev);
1194         amdgpu_vm_manager_fini(adev);
1195
1196         if (gmc_v9_0_keep_stolen_memory(adev))
1197                 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1198
1199         amdgpu_gart_table_vram_free(adev);
1200         amdgpu_bo_fini(adev);
1201         amdgpu_gart_fini(adev);
1202
1203         return 0;
1204 }
1205
1206 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1207 {
1208
1209         switch (adev->asic_type) {
1210         case CHIP_VEGA10:
1211                 if (amdgpu_sriov_vf(adev))
1212                         break;
1213                 /* fall through */
1214         case CHIP_VEGA20:
1215                 soc15_program_register_sequence(adev,
1216                                                 golden_settings_mmhub_1_0_0,
1217                                                 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1218                 soc15_program_register_sequence(adev,
1219                                                 golden_settings_athub_1_0_0,
1220                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1221                 break;
1222         case CHIP_VEGA12:
1223                 break;
1224         case CHIP_RAVEN:
1225                 /* TODO for renoir */
1226                 soc15_program_register_sequence(adev,
1227                                                 golden_settings_athub_1_0_0,
1228                                                 ARRAY_SIZE(golden_settings_athub_1_0_0));
1229                 break;
1230         default:
1231                 break;
1232         }
1233 }
1234
1235 /**
1236  * gmc_v9_0_gart_enable - gart enable
1237  *
1238  * @adev: amdgpu_device pointer
1239  */
1240 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1241 {
1242         int r, i;
1243         bool value;
1244         u32 tmp;
1245
1246         amdgpu_device_program_register_sequence(adev,
1247                                                 golden_settings_vega10_hdp,
1248                                                 ARRAY_SIZE(golden_settings_vega10_hdp));
1249
1250         if (adev->gart.bo == NULL) {
1251                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1252                 return -EINVAL;
1253         }
1254         r = amdgpu_gart_table_vram_pin(adev);
1255         if (r)
1256                 return r;
1257
1258         switch (adev->asic_type) {
1259         case CHIP_RAVEN:
1260                 /* TODO for renoir */
1261                 mmhub_v1_0_update_power_gating(adev, true);
1262                 break;
1263         default:
1264                 break;
1265         }
1266
1267         r = gfxhub_v1_0_gart_enable(adev);
1268         if (r)
1269                 return r;
1270
1271         if (adev->asic_type == CHIP_ARCTURUS)
1272                 r = mmhub_v9_4_gart_enable(adev);
1273         else
1274                 r = mmhub_v1_0_gart_enable(adev);
1275         if (r)
1276                 return r;
1277
1278         WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1279
1280         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1281         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1282
1283         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1284         WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1285
1286         /* After HDP is initialized, flush HDP.*/
1287         adev->nbio.funcs->hdp_flush(adev, NULL);
1288
1289         if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1290                 value = false;
1291         else
1292                 value = true;
1293
1294         gfxhub_v1_0_set_fault_enable_default(adev, value);
1295         if (adev->asic_type == CHIP_ARCTURUS)
1296                 mmhub_v9_4_set_fault_enable_default(adev, value);
1297         else
1298                 mmhub_v1_0_set_fault_enable_default(adev, value);
1299
1300         for (i = 0; i < adev->num_vmhubs; ++i)
1301                 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1302
1303         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1304                  (unsigned)(adev->gmc.gart_size >> 20),
1305                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1306         adev->gart.ready = true;
1307         return 0;
1308 }
1309
1310 static int gmc_v9_0_hw_init(void *handle)
1311 {
1312         int r;
1313         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1314
1315         /* The sequence of these two function calls matters.*/
1316         gmc_v9_0_init_golden_registers(adev);
1317
1318         if (adev->mode_info.num_crtc) {
1319                 /* Lockout access through VGA aperture*/
1320                 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1321
1322                 /* disable VGA render */
1323                 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1324         }
1325
1326         r = gmc_v9_0_gart_enable(adev);
1327
1328         return r;
1329 }
1330
1331 /**
1332  * gmc_v9_0_gart_disable - gart disable
1333  *
1334  * @adev: amdgpu_device pointer
1335  *
1336  * This disables all VM page table.
1337  */
1338 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1339 {
1340         gfxhub_v1_0_gart_disable(adev);
1341         if (adev->asic_type == CHIP_ARCTURUS)
1342                 mmhub_v9_4_gart_disable(adev);
1343         else
1344                 mmhub_v1_0_gart_disable(adev);
1345         amdgpu_gart_table_vram_unpin(adev);
1346 }
1347
1348 static int gmc_v9_0_hw_fini(void *handle)
1349 {
1350         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1351
1352         if (amdgpu_sriov_vf(adev)) {
1353                 /* full access mode, so don't touch any GMC register */
1354                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1355                 return 0;
1356         }
1357
1358         amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1359         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1360         gmc_v9_0_gart_disable(adev);
1361
1362         return 0;
1363 }
1364
1365 static int gmc_v9_0_suspend(void *handle)
1366 {
1367         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1368
1369         return gmc_v9_0_hw_fini(adev);
1370 }
1371
1372 static int gmc_v9_0_resume(void *handle)
1373 {
1374         int r;
1375         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1376
1377         r = gmc_v9_0_hw_init(adev);
1378         if (r)
1379                 return r;
1380
1381         amdgpu_vmid_reset_all(adev);
1382
1383         return 0;
1384 }
1385
1386 static bool gmc_v9_0_is_idle(void *handle)
1387 {
1388         /* MC is always ready in GMC v9.*/
1389         return true;
1390 }
1391
1392 static int gmc_v9_0_wait_for_idle(void *handle)
1393 {
1394         /* There is no need to wait for MC idle in GMC v9.*/
1395         return 0;
1396 }
1397
1398 static int gmc_v9_0_soft_reset(void *handle)
1399 {
1400         /* XXX for emulation.*/
1401         return 0;
1402 }
1403
1404 static int gmc_v9_0_set_clockgating_state(void *handle,
1405                                         enum amd_clockgating_state state)
1406 {
1407         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1408
1409         if (adev->asic_type == CHIP_ARCTURUS)
1410                 mmhub_v9_4_set_clockgating(adev, state);
1411         else
1412                 mmhub_v1_0_set_clockgating(adev, state);
1413
1414         athub_v1_0_set_clockgating(adev, state);
1415
1416         return 0;
1417 }
1418
1419 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1420 {
1421         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1422
1423         if (adev->asic_type == CHIP_ARCTURUS)
1424                 mmhub_v9_4_get_clockgating(adev, flags);
1425         else
1426                 mmhub_v1_0_get_clockgating(adev, flags);
1427
1428         athub_v1_0_get_clockgating(adev, flags);
1429 }
1430
1431 static int gmc_v9_0_set_powergating_state(void *handle,
1432                                         enum amd_powergating_state state)
1433 {
1434         return 0;
1435 }
1436
1437 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1438         .name = "gmc_v9_0",
1439         .early_init = gmc_v9_0_early_init,
1440         .late_init = gmc_v9_0_late_init,
1441         .sw_init = gmc_v9_0_sw_init,
1442         .sw_fini = gmc_v9_0_sw_fini,
1443         .hw_init = gmc_v9_0_hw_init,
1444         .hw_fini = gmc_v9_0_hw_fini,
1445         .suspend = gmc_v9_0_suspend,
1446         .resume = gmc_v9_0_resume,
1447         .is_idle = gmc_v9_0_is_idle,
1448         .wait_for_idle = gmc_v9_0_wait_for_idle,
1449         .soft_reset = gmc_v9_0_soft_reset,
1450         .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1451         .set_powergating_state = gmc_v9_0_set_powergating_state,
1452         .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1453 };
1454
1455 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1456 {
1457         .type = AMD_IP_BLOCK_TYPE_GMC,
1458         .major = 9,
1459         .minor = 0,
1460         .rev = 0,
1461         .funcs = &gmc_v9_0_ip_funcs,
1462 };