Merge tag 'vfio-v4.19-rc1' of git://github.com/awilliam/linux-vfio
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_device.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "kfd_priv.h"
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "cwsr_trap_handler.h"
30 #include "kfd_iommu.h"
31
32 #define MQD_SIZE_ALIGNED 768
33
34 /*
35  * kfd_locked is used to lock the kfd driver during suspend or reset
36  * once locked, kfd driver will stop any further GPU execution.
37  * create process (open) will return -EAGAIN.
38  */
39 static atomic_t kfd_locked = ATOMIC_INIT(0);
40
41 #ifdef KFD_SUPPORT_IOMMU_V2
42 static const struct kfd_device_info kaveri_device_info = {
43         .asic_family = CHIP_KAVERI,
44         .max_pasid_bits = 16,
45         /* max num of queues for KV.TODO should be a dynamic value */
46         .max_no_of_hqd  = 24,
47         .doorbell_size  = 4,
48         .ih_ring_entry_size = 4 * sizeof(uint32_t),
49         .event_interrupt_class = &event_interrupt_class_cik,
50         .num_of_watch_points = 4,
51         .mqd_size_aligned = MQD_SIZE_ALIGNED,
52         .supports_cwsr = false,
53         .needs_iommu_device = true,
54         .needs_pci_atomics = false,
55         .num_sdma_engines = 2,
56 };
57
58 static const struct kfd_device_info carrizo_device_info = {
59         .asic_family = CHIP_CARRIZO,
60         .max_pasid_bits = 16,
61         /* max num of queues for CZ.TODO should be a dynamic value */
62         .max_no_of_hqd  = 24,
63         .doorbell_size  = 4,
64         .ih_ring_entry_size = 4 * sizeof(uint32_t),
65         .event_interrupt_class = &event_interrupt_class_cik,
66         .num_of_watch_points = 4,
67         .mqd_size_aligned = MQD_SIZE_ALIGNED,
68         .supports_cwsr = true,
69         .needs_iommu_device = true,
70         .needs_pci_atomics = false,
71         .num_sdma_engines = 2,
72 };
73
74 static const struct kfd_device_info raven_device_info = {
75         .asic_family = CHIP_RAVEN,
76         .max_pasid_bits = 16,
77         .max_no_of_hqd  = 24,
78         .doorbell_size  = 8,
79         .ih_ring_entry_size = 8 * sizeof(uint32_t),
80         .event_interrupt_class = &event_interrupt_class_v9,
81         .num_of_watch_points = 4,
82         .mqd_size_aligned = MQD_SIZE_ALIGNED,
83         .supports_cwsr = true,
84         .needs_iommu_device = true,
85         .needs_pci_atomics = true,
86         .num_sdma_engines = 1,
87 };
88 #endif
89
90 static const struct kfd_device_info hawaii_device_info = {
91         .asic_family = CHIP_HAWAII,
92         .max_pasid_bits = 16,
93         /* max num of queues for KV.TODO should be a dynamic value */
94         .max_no_of_hqd  = 24,
95         .doorbell_size  = 4,
96         .ih_ring_entry_size = 4 * sizeof(uint32_t),
97         .event_interrupt_class = &event_interrupt_class_cik,
98         .num_of_watch_points = 4,
99         .mqd_size_aligned = MQD_SIZE_ALIGNED,
100         .supports_cwsr = false,
101         .needs_iommu_device = false,
102         .needs_pci_atomics = false,
103         .num_sdma_engines = 2,
104 };
105
106 static const struct kfd_device_info tonga_device_info = {
107         .asic_family = CHIP_TONGA,
108         .max_pasid_bits = 16,
109         .max_no_of_hqd  = 24,
110         .doorbell_size  = 4,
111         .ih_ring_entry_size = 4 * sizeof(uint32_t),
112         .event_interrupt_class = &event_interrupt_class_cik,
113         .num_of_watch_points = 4,
114         .mqd_size_aligned = MQD_SIZE_ALIGNED,
115         .supports_cwsr = false,
116         .needs_iommu_device = false,
117         .needs_pci_atomics = true,
118         .num_sdma_engines = 2,
119 };
120
121 static const struct kfd_device_info tonga_vf_device_info = {
122         .asic_family = CHIP_TONGA,
123         .max_pasid_bits = 16,
124         .max_no_of_hqd  = 24,
125         .doorbell_size  = 4,
126         .ih_ring_entry_size = 4 * sizeof(uint32_t),
127         .event_interrupt_class = &event_interrupt_class_cik,
128         .num_of_watch_points = 4,
129         .mqd_size_aligned = MQD_SIZE_ALIGNED,
130         .supports_cwsr = false,
131         .needs_iommu_device = false,
132         .needs_pci_atomics = false,
133         .num_sdma_engines = 2,
134 };
135
136 static const struct kfd_device_info fiji_device_info = {
137         .asic_family = CHIP_FIJI,
138         .max_pasid_bits = 16,
139         .max_no_of_hqd  = 24,
140         .doorbell_size  = 4,
141         .ih_ring_entry_size = 4 * sizeof(uint32_t),
142         .event_interrupt_class = &event_interrupt_class_cik,
143         .num_of_watch_points = 4,
144         .mqd_size_aligned = MQD_SIZE_ALIGNED,
145         .supports_cwsr = true,
146         .needs_iommu_device = false,
147         .needs_pci_atomics = true,
148         .num_sdma_engines = 2,
149 };
150
151 static const struct kfd_device_info fiji_vf_device_info = {
152         .asic_family = CHIP_FIJI,
153         .max_pasid_bits = 16,
154         .max_no_of_hqd  = 24,
155         .doorbell_size  = 4,
156         .ih_ring_entry_size = 4 * sizeof(uint32_t),
157         .event_interrupt_class = &event_interrupt_class_cik,
158         .num_of_watch_points = 4,
159         .mqd_size_aligned = MQD_SIZE_ALIGNED,
160         .supports_cwsr = true,
161         .needs_iommu_device = false,
162         .needs_pci_atomics = false,
163         .num_sdma_engines = 2,
164 };
165
166
167 static const struct kfd_device_info polaris10_device_info = {
168         .asic_family = CHIP_POLARIS10,
169         .max_pasid_bits = 16,
170         .max_no_of_hqd  = 24,
171         .doorbell_size  = 4,
172         .ih_ring_entry_size = 4 * sizeof(uint32_t),
173         .event_interrupt_class = &event_interrupt_class_cik,
174         .num_of_watch_points = 4,
175         .mqd_size_aligned = MQD_SIZE_ALIGNED,
176         .supports_cwsr = true,
177         .needs_iommu_device = false,
178         .needs_pci_atomics = true,
179         .num_sdma_engines = 2,
180 };
181
182 static const struct kfd_device_info polaris10_vf_device_info = {
183         .asic_family = CHIP_POLARIS10,
184         .max_pasid_bits = 16,
185         .max_no_of_hqd  = 24,
186         .doorbell_size  = 4,
187         .ih_ring_entry_size = 4 * sizeof(uint32_t),
188         .event_interrupt_class = &event_interrupt_class_cik,
189         .num_of_watch_points = 4,
190         .mqd_size_aligned = MQD_SIZE_ALIGNED,
191         .supports_cwsr = true,
192         .needs_iommu_device = false,
193         .needs_pci_atomics = false,
194         .num_sdma_engines = 2,
195 };
196
197 static const struct kfd_device_info polaris11_device_info = {
198         .asic_family = CHIP_POLARIS11,
199         .max_pasid_bits = 16,
200         .max_no_of_hqd  = 24,
201         .doorbell_size  = 4,
202         .ih_ring_entry_size = 4 * sizeof(uint32_t),
203         .event_interrupt_class = &event_interrupt_class_cik,
204         .num_of_watch_points = 4,
205         .mqd_size_aligned = MQD_SIZE_ALIGNED,
206         .supports_cwsr = true,
207         .needs_iommu_device = false,
208         .needs_pci_atomics = true,
209         .num_sdma_engines = 2,
210 };
211
212 static const struct kfd_device_info vega10_device_info = {
213         .asic_family = CHIP_VEGA10,
214         .max_pasid_bits = 16,
215         .max_no_of_hqd  = 24,
216         .doorbell_size  = 8,
217         .ih_ring_entry_size = 8 * sizeof(uint32_t),
218         .event_interrupt_class = &event_interrupt_class_v9,
219         .num_of_watch_points = 4,
220         .mqd_size_aligned = MQD_SIZE_ALIGNED,
221         .supports_cwsr = true,
222         .needs_iommu_device = false,
223         .needs_pci_atomics = false,
224         .num_sdma_engines = 2,
225 };
226
227 static const struct kfd_device_info vega10_vf_device_info = {
228         .asic_family = CHIP_VEGA10,
229         .max_pasid_bits = 16,
230         .max_no_of_hqd  = 24,
231         .doorbell_size  = 8,
232         .ih_ring_entry_size = 8 * sizeof(uint32_t),
233         .event_interrupt_class = &event_interrupt_class_v9,
234         .num_of_watch_points = 4,
235         .mqd_size_aligned = MQD_SIZE_ALIGNED,
236         .supports_cwsr = true,
237         .needs_iommu_device = false,
238         .needs_pci_atomics = false,
239         .num_sdma_engines = 2,
240 };
241
242
243 struct kfd_deviceid {
244         unsigned short did;
245         const struct kfd_device_info *device_info;
246 };
247
248 static const struct kfd_deviceid supported_devices[] = {
249 #ifdef KFD_SUPPORT_IOMMU_V2
250         { 0x1304, &kaveri_device_info },        /* Kaveri */
251         { 0x1305, &kaveri_device_info },        /* Kaveri */
252         { 0x1306, &kaveri_device_info },        /* Kaveri */
253         { 0x1307, &kaveri_device_info },        /* Kaveri */
254         { 0x1309, &kaveri_device_info },        /* Kaveri */
255         { 0x130A, &kaveri_device_info },        /* Kaveri */
256         { 0x130B, &kaveri_device_info },        /* Kaveri */
257         { 0x130C, &kaveri_device_info },        /* Kaveri */
258         { 0x130D, &kaveri_device_info },        /* Kaveri */
259         { 0x130E, &kaveri_device_info },        /* Kaveri */
260         { 0x130F, &kaveri_device_info },        /* Kaveri */
261         { 0x1310, &kaveri_device_info },        /* Kaveri */
262         { 0x1311, &kaveri_device_info },        /* Kaveri */
263         { 0x1312, &kaveri_device_info },        /* Kaveri */
264         { 0x1313, &kaveri_device_info },        /* Kaveri */
265         { 0x1315, &kaveri_device_info },        /* Kaveri */
266         { 0x1316, &kaveri_device_info },        /* Kaveri */
267         { 0x1317, &kaveri_device_info },        /* Kaveri */
268         { 0x1318, &kaveri_device_info },        /* Kaveri */
269         { 0x131B, &kaveri_device_info },        /* Kaveri */
270         { 0x131C, &kaveri_device_info },        /* Kaveri */
271         { 0x131D, &kaveri_device_info },        /* Kaveri */
272         { 0x9870, &carrizo_device_info },       /* Carrizo */
273         { 0x9874, &carrizo_device_info },       /* Carrizo */
274         { 0x9875, &carrizo_device_info },       /* Carrizo */
275         { 0x9876, &carrizo_device_info },       /* Carrizo */
276         { 0x9877, &carrizo_device_info },       /* Carrizo */
277         { 0x15DD, &raven_device_info },         /* Raven */
278 #endif
279         { 0x67A0, &hawaii_device_info },        /* Hawaii */
280         { 0x67A1, &hawaii_device_info },        /* Hawaii */
281         { 0x67A2, &hawaii_device_info },        /* Hawaii */
282         { 0x67A8, &hawaii_device_info },        /* Hawaii */
283         { 0x67A9, &hawaii_device_info },        /* Hawaii */
284         { 0x67AA, &hawaii_device_info },        /* Hawaii */
285         { 0x67B0, &hawaii_device_info },        /* Hawaii */
286         { 0x67B1, &hawaii_device_info },        /* Hawaii */
287         { 0x67B8, &hawaii_device_info },        /* Hawaii */
288         { 0x67B9, &hawaii_device_info },        /* Hawaii */
289         { 0x67BA, &hawaii_device_info },        /* Hawaii */
290         { 0x67BE, &hawaii_device_info },        /* Hawaii */
291         { 0x6920, &tonga_device_info },         /* Tonga */
292         { 0x6921, &tonga_device_info },         /* Tonga */
293         { 0x6928, &tonga_device_info },         /* Tonga */
294         { 0x6929, &tonga_device_info },         /* Tonga */
295         { 0x692B, &tonga_device_info },         /* Tonga */
296         { 0x692F, &tonga_vf_device_info },      /* Tonga vf */
297         { 0x6938, &tonga_device_info },         /* Tonga */
298         { 0x6939, &tonga_device_info },         /* Tonga */
299         { 0x7300, &fiji_device_info },          /* Fiji */
300         { 0x730F, &fiji_vf_device_info },       /* Fiji vf*/
301         { 0x67C0, &polaris10_device_info },     /* Polaris10 */
302         { 0x67C1, &polaris10_device_info },     /* Polaris10 */
303         { 0x67C2, &polaris10_device_info },     /* Polaris10 */
304         { 0x67C4, &polaris10_device_info },     /* Polaris10 */
305         { 0x67C7, &polaris10_device_info },     /* Polaris10 */
306         { 0x67C8, &polaris10_device_info },     /* Polaris10 */
307         { 0x67C9, &polaris10_device_info },     /* Polaris10 */
308         { 0x67CA, &polaris10_device_info },     /* Polaris10 */
309         { 0x67CC, &polaris10_device_info },     /* Polaris10 */
310         { 0x67CF, &polaris10_device_info },     /* Polaris10 */
311         { 0x67D0, &polaris10_vf_device_info },  /* Polaris10 vf*/
312         { 0x67DF, &polaris10_device_info },     /* Polaris10 */
313         { 0x67E0, &polaris11_device_info },     /* Polaris11 */
314         { 0x67E1, &polaris11_device_info },     /* Polaris11 */
315         { 0x67E3, &polaris11_device_info },     /* Polaris11 */
316         { 0x67E7, &polaris11_device_info },     /* Polaris11 */
317         { 0x67E8, &polaris11_device_info },     /* Polaris11 */
318         { 0x67E9, &polaris11_device_info },     /* Polaris11 */
319         { 0x67EB, &polaris11_device_info },     /* Polaris11 */
320         { 0x67EF, &polaris11_device_info },     /* Polaris11 */
321         { 0x67FF, &polaris11_device_info },     /* Polaris11 */
322         { 0x6860, &vega10_device_info },        /* Vega10 */
323         { 0x6861, &vega10_device_info },        /* Vega10 */
324         { 0x6862, &vega10_device_info },        /* Vega10 */
325         { 0x6863, &vega10_device_info },        /* Vega10 */
326         { 0x6864, &vega10_device_info },        /* Vega10 */
327         { 0x6867, &vega10_device_info },        /* Vega10 */
328         { 0x6868, &vega10_device_info },        /* Vega10 */
329         { 0x686C, &vega10_vf_device_info },     /* Vega10  vf*/
330         { 0x687F, &vega10_device_info },        /* Vega10 */
331 };
332
333 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
334                                 unsigned int chunk_size);
335 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
336
337 static int kfd_resume(struct kfd_dev *kfd);
338
339 static const struct kfd_device_info *lookup_device_info(unsigned short did)
340 {
341         size_t i;
342
343         for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
344                 if (supported_devices[i].did == did) {
345                         WARN_ON(!supported_devices[i].device_info);
346                         return supported_devices[i].device_info;
347                 }
348         }
349
350         dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
351                  did);
352
353         return NULL;
354 }
355
356 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
357         struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
358 {
359         struct kfd_dev *kfd;
360         int ret;
361         const struct kfd_device_info *device_info =
362                                         lookup_device_info(pdev->device);
363
364         if (!device_info) {
365                 dev_err(kfd_device, "kgd2kfd_probe failed\n");
366                 return NULL;
367         }
368
369         /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
370          * 32 and 64-bit requests are possible and must be
371          * supported.
372          */
373         ret = pci_enable_atomic_ops_to_root(pdev,
374                         PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
375                         PCI_EXP_DEVCAP2_ATOMIC_COMP64);
376         if (device_info->needs_pci_atomics && ret < 0) {
377                 dev_info(kfd_device,
378                          "skipped device %x:%x, PCI rejects atomics\n",
379                          pdev->vendor, pdev->device);
380                 return NULL;
381         }
382
383         kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
384         if (!kfd)
385                 return NULL;
386
387         kfd->kgd = kgd;
388         kfd->device_info = device_info;
389         kfd->pdev = pdev;
390         kfd->init_complete = false;
391         kfd->kfd2kgd = f2g;
392
393         mutex_init(&kfd->doorbell_mutex);
394         memset(&kfd->doorbell_available_index, 0,
395                 sizeof(kfd->doorbell_available_index));
396
397         return kfd;
398 }
399
400 static void kfd_cwsr_init(struct kfd_dev *kfd)
401 {
402         if (cwsr_enable && kfd->device_info->supports_cwsr) {
403                 if (kfd->device_info->asic_family < CHIP_VEGA10) {
404                         BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
405                         kfd->cwsr_isa = cwsr_trap_gfx8_hex;
406                         kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
407                 } else {
408                         BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
409                         kfd->cwsr_isa = cwsr_trap_gfx9_hex;
410                         kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
411                 }
412
413                 kfd->cwsr_enabled = true;
414         }
415 }
416
417 bool kgd2kfd_device_init(struct kfd_dev *kfd,
418                          const struct kgd2kfd_shared_resources *gpu_resources)
419 {
420         unsigned int size;
421
422         kfd->shared_resources = *gpu_resources;
423
424         kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
425         kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
426         kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
427                         - kfd->vm_info.first_vmid_kfd + 1;
428
429         /* Verify module parameters regarding mapped process number*/
430         if ((hws_max_conc_proc < 0)
431                         || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
432                 dev_err(kfd_device,
433                         "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
434                         hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
435                         kfd->vm_info.vmid_num_kfd);
436                 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
437         } else
438                 kfd->max_proc_per_quantum = hws_max_conc_proc;
439
440         /* calculate max size of mqds needed for queues */
441         size = max_num_of_queues_per_device *
442                         kfd->device_info->mqd_size_aligned;
443
444         /*
445          * calculate max size of runlist packet.
446          * There can be only 2 packets at once
447          */
448         size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
449                 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
450                 + sizeof(struct pm4_mes_runlist)) * 2;
451
452         /* Add size of HIQ & DIQ */
453         size += KFD_KERNEL_QUEUE_SIZE * 2;
454
455         /* add another 512KB for all other allocations on gart (HPD, fences) */
456         size += 512 * 1024;
457
458         if (kfd->kfd2kgd->init_gtt_mem_allocation(
459                         kfd->kgd, size, &kfd->gtt_mem,
460                         &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
461                 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
462                 goto out;
463         }
464
465         dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
466
467         /* Initialize GTT sa with 512 byte chunk size */
468         if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
469                 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
470                 goto kfd_gtt_sa_init_error;
471         }
472
473         if (kfd_doorbell_init(kfd)) {
474                 dev_err(kfd_device,
475                         "Error initializing doorbell aperture\n");
476                 goto kfd_doorbell_error;
477         }
478
479         if (kfd_topology_add_device(kfd)) {
480                 dev_err(kfd_device, "Error adding device to topology\n");
481                 goto kfd_topology_add_device_error;
482         }
483
484         if (kfd_interrupt_init(kfd)) {
485                 dev_err(kfd_device, "Error initializing interrupts\n");
486                 goto kfd_interrupt_error;
487         }
488
489         kfd->dqm = device_queue_manager_init(kfd);
490         if (!kfd->dqm) {
491                 dev_err(kfd_device, "Error initializing queue manager\n");
492                 goto device_queue_manager_error;
493         }
494
495         if (kfd_iommu_device_init(kfd)) {
496                 dev_err(kfd_device, "Error initializing iommuv2\n");
497                 goto device_iommu_error;
498         }
499
500         kfd_cwsr_init(kfd);
501
502         if (kfd_resume(kfd))
503                 goto kfd_resume_error;
504
505         kfd->dbgmgr = NULL;
506
507         kfd->init_complete = true;
508         dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
509                  kfd->pdev->device);
510
511         pr_debug("Starting kfd with the following scheduling policy %d\n",
512                 kfd->dqm->sched_policy);
513
514         goto out;
515
516 kfd_resume_error:
517 device_iommu_error:
518         device_queue_manager_uninit(kfd->dqm);
519 device_queue_manager_error:
520         kfd_interrupt_exit(kfd);
521 kfd_interrupt_error:
522         kfd_topology_remove_device(kfd);
523 kfd_topology_add_device_error:
524         kfd_doorbell_fini(kfd);
525 kfd_doorbell_error:
526         kfd_gtt_sa_fini(kfd);
527 kfd_gtt_sa_init_error:
528         kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
529         dev_err(kfd_device,
530                 "device %x:%x NOT added due to errors\n",
531                 kfd->pdev->vendor, kfd->pdev->device);
532 out:
533         return kfd->init_complete;
534 }
535
536 void kgd2kfd_device_exit(struct kfd_dev *kfd)
537 {
538         if (kfd->init_complete) {
539                 kgd2kfd_suspend(kfd);
540                 device_queue_manager_uninit(kfd->dqm);
541                 kfd_interrupt_exit(kfd);
542                 kfd_topology_remove_device(kfd);
543                 kfd_doorbell_fini(kfd);
544                 kfd_gtt_sa_fini(kfd);
545                 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
546         }
547
548         kfree(kfd);
549 }
550
551 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
552 {
553         if (!kfd->init_complete)
554                 return 0;
555         kgd2kfd_suspend(kfd);
556
557         /* hold dqm->lock to prevent further execution*/
558         dqm_lock(kfd->dqm);
559
560         kfd_signal_reset_event(kfd);
561         return 0;
562 }
563
564 /*
565  * Fix me. KFD won't be able to resume existing process for now.
566  * We will keep all existing process in a evicted state and
567  * wait the process to be terminated.
568  */
569
570 int kgd2kfd_post_reset(struct kfd_dev *kfd)
571 {
572         int ret, count;
573
574         if (!kfd->init_complete)
575                 return 0;
576
577         dqm_unlock(kfd->dqm);
578
579         ret = kfd_resume(kfd);
580         if (ret)
581                 return ret;
582         count = atomic_dec_return(&kfd_locked);
583         WARN_ONCE(count != 0, "KFD reset ref. error");
584         return 0;
585 }
586
587 bool kfd_is_locked(void)
588 {
589         return  (atomic_read(&kfd_locked) > 0);
590 }
591
592 void kgd2kfd_suspend(struct kfd_dev *kfd)
593 {
594         if (!kfd->init_complete)
595                 return;
596
597         /* For first KFD device suspend all the KFD processes */
598         if (atomic_inc_return(&kfd_locked) == 1)
599                 kfd_suspend_all_processes();
600
601         kfd->dqm->ops.stop(kfd->dqm);
602
603         kfd_iommu_suspend(kfd);
604 }
605
606 int kgd2kfd_resume(struct kfd_dev *kfd)
607 {
608         int ret, count;
609
610         if (!kfd->init_complete)
611                 return 0;
612
613         ret = kfd_resume(kfd);
614         if (ret)
615                 return ret;
616
617         count = atomic_dec_return(&kfd_locked);
618         WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
619         if (count == 0)
620                 ret = kfd_resume_all_processes();
621
622         return ret;
623 }
624
625 static int kfd_resume(struct kfd_dev *kfd)
626 {
627         int err = 0;
628
629         err = kfd_iommu_resume(kfd);
630         if (err) {
631                 dev_err(kfd_device,
632                         "Failed to resume IOMMU for device %x:%x\n",
633                         kfd->pdev->vendor, kfd->pdev->device);
634                 return err;
635         }
636
637         err = kfd->dqm->ops.start(kfd->dqm);
638         if (err) {
639                 dev_err(kfd_device,
640                         "Error starting queue manager for device %x:%x\n",
641                         kfd->pdev->vendor, kfd->pdev->device);
642                 goto dqm_start_error;
643         }
644
645         return err;
646
647 dqm_start_error:
648         kfd_iommu_suspend(kfd);
649         return err;
650 }
651
652 /* This is called directly from KGD at ISR. */
653 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
654 {
655         uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
656         bool is_patched = false;
657
658         if (!kfd->init_complete)
659                 return;
660
661         if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
662                 dev_err_once(kfd_device, "Ring entry too small\n");
663                 return;
664         }
665
666         spin_lock(&kfd->interrupt_lock);
667
668         if (kfd->interrupts_active
669             && interrupt_is_wanted(kfd, ih_ring_entry,
670                                    patched_ihre, &is_patched)
671             && enqueue_ih_ring_entry(kfd,
672                                      is_patched ? patched_ihre : ih_ring_entry))
673                 queue_work(kfd->ih_wq, &kfd->interrupt_work);
674
675         spin_unlock(&kfd->interrupt_lock);
676 }
677
678 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
679 {
680         struct kfd_process *p;
681         int r;
682
683         /* Because we are called from arbitrary context (workqueue) as opposed
684          * to process context, kfd_process could attempt to exit while we are
685          * running so the lookup function increments the process ref count.
686          */
687         p = kfd_lookup_process_by_mm(mm);
688         if (!p)
689                 return -ESRCH;
690
691         r = kfd_process_evict_queues(p);
692
693         kfd_unref_process(p);
694         return r;
695 }
696
697 int kgd2kfd_resume_mm(struct mm_struct *mm)
698 {
699         struct kfd_process *p;
700         int r;
701
702         /* Because we are called from arbitrary context (workqueue) as opposed
703          * to process context, kfd_process could attempt to exit while we are
704          * running so the lookup function increments the process ref count.
705          */
706         p = kfd_lookup_process_by_mm(mm);
707         if (!p)
708                 return -ESRCH;
709
710         r = kfd_process_restore_queues(p);
711
712         kfd_unref_process(p);
713         return r;
714 }
715
716 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
717  *   prepare for safe eviction of KFD BOs that belong to the specified
718  *   process.
719  *
720  * @mm: mm_struct that identifies the specified KFD process
721  * @fence: eviction fence attached to KFD process BOs
722  *
723  */
724 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
725                                                struct dma_fence *fence)
726 {
727         struct kfd_process *p;
728         unsigned long active_time;
729         unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
730
731         if (!fence)
732                 return -EINVAL;
733
734         if (dma_fence_is_signaled(fence))
735                 return 0;
736
737         p = kfd_lookup_process_by_mm(mm);
738         if (!p)
739                 return -ENODEV;
740
741         if (fence->seqno == p->last_eviction_seqno)
742                 goto out;
743
744         p->last_eviction_seqno = fence->seqno;
745
746         /* Avoid KFD process starvation. Wait for at least
747          * PROCESS_ACTIVE_TIME_MS before evicting the process again
748          */
749         active_time = get_jiffies_64() - p->last_restore_timestamp;
750         if (delay_jiffies > active_time)
751                 delay_jiffies -= active_time;
752         else
753                 delay_jiffies = 0;
754
755         /* During process initialization eviction_work.dwork is initialized
756          * to kfd_evict_bo_worker
757          */
758         schedule_delayed_work(&p->eviction_work, delay_jiffies);
759 out:
760         kfd_unref_process(p);
761         return 0;
762 }
763
764 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
765                                 unsigned int chunk_size)
766 {
767         unsigned int num_of_longs;
768
769         if (WARN_ON(buf_size < chunk_size))
770                 return -EINVAL;
771         if (WARN_ON(buf_size == 0))
772                 return -EINVAL;
773         if (WARN_ON(chunk_size == 0))
774                 return -EINVAL;
775
776         kfd->gtt_sa_chunk_size = chunk_size;
777         kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
778
779         num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
780                 BITS_PER_LONG;
781
782         kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
783
784         if (!kfd->gtt_sa_bitmap)
785                 return -ENOMEM;
786
787         pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
788                         kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
789
790         mutex_init(&kfd->gtt_sa_lock);
791
792         return 0;
793
794 }
795
796 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
797 {
798         mutex_destroy(&kfd->gtt_sa_lock);
799         kfree(kfd->gtt_sa_bitmap);
800 }
801
802 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
803                                                 unsigned int bit_num,
804                                                 unsigned int chunk_size)
805 {
806         return start_addr + bit_num * chunk_size;
807 }
808
809 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
810                                                 unsigned int bit_num,
811                                                 unsigned int chunk_size)
812 {
813         return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
814 }
815
816 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
817                         struct kfd_mem_obj **mem_obj)
818 {
819         unsigned int found, start_search, cur_size;
820
821         if (size == 0)
822                 return -EINVAL;
823
824         if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
825                 return -ENOMEM;
826
827         *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
828         if (!(*mem_obj))
829                 return -ENOMEM;
830
831         pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
832
833         start_search = 0;
834
835         mutex_lock(&kfd->gtt_sa_lock);
836
837 kfd_gtt_restart_search:
838         /* Find the first chunk that is free */
839         found = find_next_zero_bit(kfd->gtt_sa_bitmap,
840                                         kfd->gtt_sa_num_of_chunks,
841                                         start_search);
842
843         pr_debug("Found = %d\n", found);
844
845         /* If there wasn't any free chunk, bail out */
846         if (found == kfd->gtt_sa_num_of_chunks)
847                 goto kfd_gtt_no_free_chunk;
848
849         /* Update fields of mem_obj */
850         (*mem_obj)->range_start = found;
851         (*mem_obj)->range_end = found;
852         (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
853                                         kfd->gtt_start_gpu_addr,
854                                         found,
855                                         kfd->gtt_sa_chunk_size);
856         (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
857                                         kfd->gtt_start_cpu_ptr,
858                                         found,
859                                         kfd->gtt_sa_chunk_size);
860
861         pr_debug("gpu_addr = %p, cpu_addr = %p\n",
862                         (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
863
864         /* If we need only one chunk, mark it as allocated and get out */
865         if (size <= kfd->gtt_sa_chunk_size) {
866                 pr_debug("Single bit\n");
867                 set_bit(found, kfd->gtt_sa_bitmap);
868                 goto kfd_gtt_out;
869         }
870
871         /* Otherwise, try to see if we have enough contiguous chunks */
872         cur_size = size - kfd->gtt_sa_chunk_size;
873         do {
874                 (*mem_obj)->range_end =
875                         find_next_zero_bit(kfd->gtt_sa_bitmap,
876                                         kfd->gtt_sa_num_of_chunks, ++found);
877                 /*
878                  * If next free chunk is not contiguous than we need to
879                  * restart our search from the last free chunk we found (which
880                  * wasn't contiguous to the previous ones
881                  */
882                 if ((*mem_obj)->range_end != found) {
883                         start_search = found;
884                         goto kfd_gtt_restart_search;
885                 }
886
887                 /*
888                  * If we reached end of buffer, bail out with error
889                  */
890                 if (found == kfd->gtt_sa_num_of_chunks)
891                         goto kfd_gtt_no_free_chunk;
892
893                 /* Check if we don't need another chunk */
894                 if (cur_size <= kfd->gtt_sa_chunk_size)
895                         cur_size = 0;
896                 else
897                         cur_size -= kfd->gtt_sa_chunk_size;
898
899         } while (cur_size > 0);
900
901         pr_debug("range_start = %d, range_end = %d\n",
902                 (*mem_obj)->range_start, (*mem_obj)->range_end);
903
904         /* Mark the chunks as allocated */
905         for (found = (*mem_obj)->range_start;
906                 found <= (*mem_obj)->range_end;
907                 found++)
908                 set_bit(found, kfd->gtt_sa_bitmap);
909
910 kfd_gtt_out:
911         mutex_unlock(&kfd->gtt_sa_lock);
912         return 0;
913
914 kfd_gtt_no_free_chunk:
915         pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
916         mutex_unlock(&kfd->gtt_sa_lock);
917         kfree(mem_obj);
918         return -ENOMEM;
919 }
920
921 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
922 {
923         unsigned int bit;
924
925         /* Act like kfree when trying to free a NULL object */
926         if (!mem_obj)
927                 return 0;
928
929         pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
930                         mem_obj, mem_obj->range_start, mem_obj->range_end);
931
932         mutex_lock(&kfd->gtt_sa_lock);
933
934         /* Mark the chunks as free */
935         for (bit = mem_obj->range_start;
936                 bit <= mem_obj->range_end;
937                 bit++)
938                 clear_bit(bit, kfd->gtt_sa_bitmap);
939
940         mutex_unlock(&kfd->gtt_sa_lock);
941
942         kfree(mem_obj);
943         return 0;
944 }
945
946 #if defined(CONFIG_DEBUG_FS)
947
948 /* This function will send a package to HIQ to hang the HWS
949  * which will trigger a GPU reset and bring the HWS back to normal state
950  */
951 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
952 {
953         int r = 0;
954
955         if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
956                 pr_err("HWS is not enabled");
957                 return -EINVAL;
958         }
959
960         r = pm_debugfs_hang_hws(&dev->dqm->packets);
961         if (!r)
962                 r = dqm_debugfs_execute_queues(dev->dqm);
963
964         return r;
965 }
966
967 #endif