gpio: tegra186: Don't set parent IRQ affinity
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gvt / gvt.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *
27  * Contributors:
28  *    Niu Bing <bing.niu@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32
33 #include <linux/types.h>
34 #include <linux/kthread.h>
35
36 #include "i915_drv.h"
37 #include "intel_gvt.h"
38 #include "gvt.h"
39 #include <linux/vfio.h>
40 #include <linux/mdev.h>
41
42 struct intel_gvt_host intel_gvt_host;
43
44 static const char * const supported_hypervisors[] = {
45         [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
46         [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
47 };
48
49 static struct intel_vgpu_type *
50 intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
51 {
52         if (WARN_ON(type_group_id >= gvt->num_types))
53                 return NULL;
54         return &gvt->types[type_group_id];
55 }
56
57 static ssize_t available_instances_show(struct mdev_type *mtype,
58                                         struct mdev_type_attribute *attr,
59                                         char *buf)
60 {
61         struct intel_vgpu_type *type;
62         unsigned int num = 0;
63         void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
64
65         type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
66         if (!type)
67                 num = 0;
68         else
69                 num = type->avail_instance;
70
71         return sprintf(buf, "%u\n", num);
72 }
73
74 static ssize_t device_api_show(struct mdev_type *mtype,
75                                struct mdev_type_attribute *attr, char *buf)
76 {
77         return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
78 }
79
80 static ssize_t description_show(struct mdev_type *mtype,
81                                 struct mdev_type_attribute *attr, char *buf)
82 {
83         struct intel_vgpu_type *type;
84         void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
85
86         type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
87         if (!type)
88                 return 0;
89
90         return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
91                        "fence: %d\nresolution: %s\n"
92                        "weight: %d\n",
93                        BYTES_TO_MB(type->low_gm_size),
94                        BYTES_TO_MB(type->high_gm_size),
95                        type->fence, vgpu_edid_str(type->resolution),
96                        type->weight);
97 }
98
99 static MDEV_TYPE_ATTR_RO(available_instances);
100 static MDEV_TYPE_ATTR_RO(device_api);
101 static MDEV_TYPE_ATTR_RO(description);
102
103 static struct attribute *gvt_type_attrs[] = {
104         &mdev_type_attr_available_instances.attr,
105         &mdev_type_attr_device_api.attr,
106         &mdev_type_attr_description.attr,
107         NULL,
108 };
109
110 static struct attribute_group *gvt_vgpu_type_groups[] = {
111         [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
112 };
113
114 static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
115 {
116         *intel_vgpu_type_groups = gvt_vgpu_type_groups;
117         return true;
118 }
119
120 static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
121 {
122         int i, j;
123         struct intel_vgpu_type *type;
124         struct attribute_group *group;
125
126         for (i = 0; i < gvt->num_types; i++) {
127                 type = &gvt->types[i];
128
129                 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
130                 if (WARN_ON(!group))
131                         goto unwind;
132
133                 group->name = type->name;
134                 group->attrs = gvt_type_attrs;
135                 gvt_vgpu_type_groups[i] = group;
136         }
137
138         return 0;
139
140 unwind:
141         for (j = 0; j < i; j++) {
142                 group = gvt_vgpu_type_groups[j];
143                 kfree(group);
144         }
145
146         return -ENOMEM;
147 }
148
149 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
150 {
151         int i;
152         struct attribute_group *group;
153
154         for (i = 0; i < gvt->num_types; i++) {
155                 group = gvt_vgpu_type_groups[i];
156                 gvt_vgpu_type_groups[i] = NULL;
157                 kfree(group);
158         }
159 }
160
161 static const struct intel_gvt_ops intel_gvt_ops = {
162         .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
163         .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
164         .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
165         .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
166         .vgpu_create = intel_gvt_create_vgpu,
167         .vgpu_destroy = intel_gvt_destroy_vgpu,
168         .vgpu_release = intel_gvt_release_vgpu,
169         .vgpu_reset = intel_gvt_reset_vgpu,
170         .vgpu_activate = intel_gvt_activate_vgpu,
171         .vgpu_deactivate = intel_gvt_deactivate_vgpu,
172         .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
173         .get_gvt_attrs = intel_get_gvt_attrs,
174         .vgpu_query_plane = intel_vgpu_query_plane,
175         .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
176         .write_protect_handler = intel_vgpu_page_track_handler,
177         .emulate_hotplug = intel_vgpu_emulate_hotplug,
178 };
179
180 static void init_device_info(struct intel_gvt *gvt)
181 {
182         struct intel_gvt_device_info *info = &gvt->device_info;
183         struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
184
185         info->max_support_vgpus = 8;
186         info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
187         info->mmio_size = 2 * 1024 * 1024;
188         info->mmio_bar = 0;
189         info->gtt_start_offset = 8 * 1024 * 1024;
190         info->gtt_entry_size = 8;
191         info->gtt_entry_size_shift = 3;
192         info->gmadr_bytes_in_cmd = 8;
193         info->max_surface_size = 36 * 1024 * 1024;
194         info->msi_cap_offset = pdev->msi_cap;
195 }
196
197 static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
198 {
199         struct intel_vgpu *vgpu;
200         int id;
201
202         mutex_lock(&gvt->lock);
203         idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
204                 if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
205                                        (void *)&gvt->service_request)) {
206                         if (vgpu->active)
207                                 intel_vgpu_emulate_vblank(vgpu);
208                 }
209         }
210         mutex_unlock(&gvt->lock);
211 }
212
213 static int gvt_service_thread(void *data)
214 {
215         struct intel_gvt *gvt = (struct intel_gvt *)data;
216         int ret;
217
218         gvt_dbg_core("service thread start\n");
219
220         while (!kthread_should_stop()) {
221                 ret = wait_event_interruptible(gvt->service_thread_wq,
222                                 kthread_should_stop() || gvt->service_request);
223
224                 if (kthread_should_stop())
225                         break;
226
227                 if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
228                         continue;
229
230                 intel_gvt_test_and_emulate_vblank(gvt);
231
232                 if (test_bit(INTEL_GVT_REQUEST_SCHED,
233                                 (void *)&gvt->service_request) ||
234                         test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
235                                         (void *)&gvt->service_request)) {
236                         intel_gvt_schedule(gvt);
237                 }
238         }
239
240         return 0;
241 }
242
243 static void clean_service_thread(struct intel_gvt *gvt)
244 {
245         kthread_stop(gvt->service_thread);
246 }
247
248 static int init_service_thread(struct intel_gvt *gvt)
249 {
250         init_waitqueue_head(&gvt->service_thread_wq);
251
252         gvt->service_thread = kthread_run(gvt_service_thread,
253                         gvt, "gvt_service_thread");
254         if (IS_ERR(gvt->service_thread)) {
255                 gvt_err("fail to start service thread.\n");
256                 return PTR_ERR(gvt->service_thread);
257         }
258         return 0;
259 }
260
261 /**
262  * intel_gvt_clean_device - clean a GVT device
263  * @i915: i915 private
264  *
265  * This function is called at the driver unloading stage, to free the
266  * resources owned by a GVT device.
267  *
268  */
269 void intel_gvt_clean_device(struct drm_i915_private *i915)
270 {
271         struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
272
273         if (drm_WARN_ON(&i915->drm, !gvt))
274                 return;
275
276         intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
277         intel_gvt_cleanup_vgpu_type_groups(gvt);
278         intel_gvt_clean_vgpu_types(gvt);
279
280         intel_gvt_debugfs_clean(gvt);
281         clean_service_thread(gvt);
282         intel_gvt_clean_cmd_parser(gvt);
283         intel_gvt_clean_sched_policy(gvt);
284         intel_gvt_clean_workload_scheduler(gvt);
285         intel_gvt_clean_gtt(gvt);
286         intel_gvt_free_firmware(gvt);
287         intel_gvt_clean_mmio_info(gvt);
288         idr_destroy(&gvt->vgpu_idr);
289
290         kfree(i915->gvt);
291 }
292
293 /**
294  * intel_gvt_init_device - initialize a GVT device
295  * @i915: drm i915 private data
296  *
297  * This function is called at the initialization stage, to initialize
298  * necessary GVT components.
299  *
300  * Returns:
301  * Zero on success, negative error code if failed.
302  *
303  */
304 int intel_gvt_init_device(struct drm_i915_private *i915)
305 {
306         struct intel_gvt *gvt;
307         struct intel_vgpu *vgpu;
308         int ret;
309
310         if (drm_WARN_ON(&i915->drm, i915->gvt))
311                 return -EEXIST;
312
313         gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
314         if (!gvt)
315                 return -ENOMEM;
316
317         gvt_dbg_core("init gvt device\n");
318
319         idr_init_base(&gvt->vgpu_idr, 1);
320         spin_lock_init(&gvt->scheduler.mmio_context_lock);
321         mutex_init(&gvt->lock);
322         mutex_init(&gvt->sched_lock);
323         gvt->gt = &i915->gt;
324         i915->gvt = gvt;
325
326         init_device_info(gvt);
327
328         ret = intel_gvt_setup_mmio_info(gvt);
329         if (ret)
330                 goto out_clean_idr;
331
332         intel_gvt_init_engine_mmio_context(gvt);
333
334         ret = intel_gvt_load_firmware(gvt);
335         if (ret)
336                 goto out_clean_mmio_info;
337
338         ret = intel_gvt_init_irq(gvt);
339         if (ret)
340                 goto out_free_firmware;
341
342         ret = intel_gvt_init_gtt(gvt);
343         if (ret)
344                 goto out_free_firmware;
345
346         ret = intel_gvt_init_workload_scheduler(gvt);
347         if (ret)
348                 goto out_clean_gtt;
349
350         ret = intel_gvt_init_sched_policy(gvt);
351         if (ret)
352                 goto out_clean_workload_scheduler;
353
354         ret = intel_gvt_init_cmd_parser(gvt);
355         if (ret)
356                 goto out_clean_sched_policy;
357
358         ret = init_service_thread(gvt);
359         if (ret)
360                 goto out_clean_cmd_parser;
361
362         ret = intel_gvt_init_vgpu_types(gvt);
363         if (ret)
364                 goto out_clean_thread;
365
366         ret = intel_gvt_init_vgpu_type_groups(gvt);
367         if (ret) {
368                 gvt_err("failed to init vgpu type groups: %d\n", ret);
369                 goto out_clean_types;
370         }
371
372         vgpu = intel_gvt_create_idle_vgpu(gvt);
373         if (IS_ERR(vgpu)) {
374                 ret = PTR_ERR(vgpu);
375                 gvt_err("failed to create idle vgpu\n");
376                 goto out_clean_types;
377         }
378         gvt->idle_vgpu = vgpu;
379
380         intel_gvt_debugfs_init(gvt);
381
382         gvt_dbg_core("gvt device initialization is done\n");
383         intel_gvt_host.dev = i915->drm.dev;
384         intel_gvt_host.initialized = true;
385         return 0;
386
387 out_clean_types:
388         intel_gvt_clean_vgpu_types(gvt);
389 out_clean_thread:
390         clean_service_thread(gvt);
391 out_clean_cmd_parser:
392         intel_gvt_clean_cmd_parser(gvt);
393 out_clean_sched_policy:
394         intel_gvt_clean_sched_policy(gvt);
395 out_clean_workload_scheduler:
396         intel_gvt_clean_workload_scheduler(gvt);
397 out_clean_gtt:
398         intel_gvt_clean_gtt(gvt);
399 out_free_firmware:
400         intel_gvt_free_firmware(gvt);
401 out_clean_mmio_info:
402         intel_gvt_clean_mmio_info(gvt);
403 out_clean_idr:
404         idr_destroy(&gvt->vgpu_idr);
405         kfree(gvt);
406         i915->gvt = NULL;
407         return ret;
408 }
409
410 int
411 intel_gvt_pm_resume(struct intel_gvt *gvt)
412 {
413         intel_gvt_restore_fence(gvt);
414         intel_gvt_restore_mmio(gvt);
415         intel_gvt_restore_ggtt(gvt);
416         return 0;
417 }
418
419 int
420 intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
421 {
422         int ret;
423         void *gvt;
424
425         if (!intel_gvt_host.initialized)
426                 return -ENODEV;
427
428         if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
429             m->type != INTEL_GVT_HYPERVISOR_XEN)
430                 return -EINVAL;
431
432         /* Get a reference for device model module */
433         if (!try_module_get(THIS_MODULE))
434                 return -ENODEV;
435
436         intel_gvt_host.mpt = m;
437         intel_gvt_host.hypervisor_type = m->type;
438         gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
439
440         ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
441                                              &intel_gvt_ops);
442         if (ret < 0) {
443                 gvt_err("Failed to init %s hypervisor module\n",
444                         supported_hypervisors[intel_gvt_host.hypervisor_type]);
445                 module_put(THIS_MODULE);
446                 return -ENODEV;
447         }
448         gvt_dbg_core("Running with hypervisor %s in host mode\n",
449                      supported_hypervisors[intel_gvt_host.hypervisor_type]);
450         return 0;
451 }
452 EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
453
454 void
455 intel_gvt_unregister_hypervisor(void)
456 {
457         intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
458         module_put(THIS_MODULE);
459 }
460 EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);