vfio/mdev: add mdev available instance checking to the core
[linux-2.6-microblaze.git] / drivers / s390 / cio / vfio_ccw_drv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VFIO based Physical Subchannel device driver
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10  *            Cornelia Huck <cohuck@redhat.com>
11  */
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/mdev.h>
17
18 #include <asm/isc.h>
19
20 #include "chp.h"
21 #include "ioasm.h"
22 #include "css.h"
23 #include "vfio_ccw_private.h"
24
25 struct workqueue_struct *vfio_ccw_work_q;
26 static struct kmem_cache *vfio_ccw_io_region;
27 static struct kmem_cache *vfio_ccw_cmd_region;
28 static struct kmem_cache *vfio_ccw_schib_region;
29 static struct kmem_cache *vfio_ccw_crw_region;
30
31 debug_info_t *vfio_ccw_debug_msg_id;
32 debug_info_t *vfio_ccw_debug_trace_id;
33
34 /*
35  * Helpers
36  */
37 int vfio_ccw_sch_quiesce(struct subchannel *sch)
38 {
39         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
40         DECLARE_COMPLETION_ONSTACK(completion);
41         int iretry, ret = 0;
42
43         iretry = 255;
44         do {
45
46                 ret = cio_cancel_halt_clear(sch, &iretry);
47
48                 if (ret == -EIO) {
49                         pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
50                                sch->schid.ssid, sch->schid.sch_no);
51                         break;
52                 }
53
54                 /*
55                  * Flush all I/O and wait for
56                  * cancel/halt/clear completion.
57                  */
58                 private->completion = &completion;
59                 spin_unlock_irq(sch->lock);
60
61                 if (ret == -EBUSY)
62                         wait_for_completion_timeout(&completion, 3*HZ);
63
64                 private->completion = NULL;
65                 flush_workqueue(vfio_ccw_work_q);
66                 spin_lock_irq(sch->lock);
67                 ret = cio_disable_subchannel(sch);
68         } while (ret == -EBUSY);
69
70         return ret;
71 }
72
73 static void vfio_ccw_sch_io_todo(struct work_struct *work)
74 {
75         struct vfio_ccw_private *private;
76         struct irb *irb;
77         bool is_final;
78         bool cp_is_finished = false;
79
80         private = container_of(work, struct vfio_ccw_private, io_work);
81         irb = &private->irb;
82
83         is_final = !(scsw_actl(&irb->scsw) &
84                      (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
85         if (scsw_is_solicited(&irb->scsw)) {
86                 cp_update_scsw(&private->cp, &irb->scsw);
87                 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
88                         cp_free(&private->cp);
89                         cp_is_finished = true;
90                 }
91         }
92         mutex_lock(&private->io_mutex);
93         memcpy(private->io_region->irb_area, irb, sizeof(*irb));
94         mutex_unlock(&private->io_mutex);
95
96         /*
97          * Reset to IDLE only if processing of a channel program
98          * has finished. Do not overwrite a possible processing
99          * state if the interrupt was unsolicited, or if the final
100          * interrupt was for HSCH or CSCH.
101          */
102         if (cp_is_finished)
103                 private->state = VFIO_CCW_STATE_IDLE;
104
105         if (private->io_trigger)
106                 eventfd_signal(private->io_trigger, 1);
107 }
108
109 static void vfio_ccw_crw_todo(struct work_struct *work)
110 {
111         struct vfio_ccw_private *private;
112
113         private = container_of(work, struct vfio_ccw_private, crw_work);
114
115         if (!list_empty(&private->crw) && private->crw_trigger)
116                 eventfd_signal(private->crw_trigger, 1);
117 }
118
119 /*
120  * Css driver callbacks
121  */
122 static void vfio_ccw_sch_irq(struct subchannel *sch)
123 {
124         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
125
126         inc_irq_stat(IRQIO_CIO);
127         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
128 }
129
130 static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
131 {
132         struct vfio_ccw_private *private;
133
134         private = kzalloc(sizeof(*private), GFP_KERNEL);
135         if (!private)
136                 return ERR_PTR(-ENOMEM);
137
138         private->sch = sch;
139         mutex_init(&private->io_mutex);
140         private->state = VFIO_CCW_STATE_STANDBY;
141         INIT_LIST_HEAD(&private->crw);
142         INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
143         INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
144
145         private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
146                                        GFP_KERNEL);
147         if (!private->cp.guest_cp)
148                 goto out_free_private;
149
150         private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
151                                                GFP_KERNEL | GFP_DMA);
152         if (!private->io_region)
153                 goto out_free_cp;
154
155         private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
156                                                 GFP_KERNEL | GFP_DMA);
157         if (!private->cmd_region)
158                 goto out_free_io;
159
160         private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
161                                                   GFP_KERNEL | GFP_DMA);
162
163         if (!private->schib_region)
164                 goto out_free_cmd;
165
166         private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
167                                                 GFP_KERNEL | GFP_DMA);
168
169         if (!private->crw_region)
170                 goto out_free_schib;
171         return private;
172
173 out_free_schib:
174         kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
175 out_free_cmd:
176         kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
177 out_free_io:
178         kmem_cache_free(vfio_ccw_io_region, private->io_region);
179 out_free_cp:
180         kfree(private->cp.guest_cp);
181 out_free_private:
182         mutex_destroy(&private->io_mutex);
183         kfree(private);
184         return ERR_PTR(-ENOMEM);
185 }
186
187 static void vfio_ccw_free_private(struct vfio_ccw_private *private)
188 {
189         struct vfio_ccw_crw *crw, *temp;
190
191         list_for_each_entry_safe(crw, temp, &private->crw, next) {
192                 list_del(&crw->next);
193                 kfree(crw);
194         }
195
196         kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
197         kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
198         kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
199         kmem_cache_free(vfio_ccw_io_region, private->io_region);
200         kfree(private->cp.guest_cp);
201         mutex_destroy(&private->io_mutex);
202         kfree(private);
203 }
204 static int vfio_ccw_sch_probe(struct subchannel *sch)
205 {
206         struct pmcw *pmcw = &sch->schib.pmcw;
207         struct vfio_ccw_private *private;
208         int ret = -ENOMEM;
209
210         if (pmcw->qf) {
211                 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
212                          dev_name(&sch->dev));
213                 return -ENODEV;
214         }
215
216         private = vfio_ccw_alloc_private(sch);
217         if (IS_ERR(private))
218                 return PTR_ERR(private);
219
220         dev_set_drvdata(&sch->dev, private);
221
222         private->mdev_type.sysfs_name = "io";
223         private->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
224         private->mdev_types[0] = &private->mdev_type;
225         ret = mdev_register_parent(&private->parent, &sch->dev,
226                                    &vfio_ccw_mdev_driver,
227                                    private->mdev_types, 1);
228         if (ret)
229                 goto out_free;
230
231         VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
232                            sch->schid.cssid, sch->schid.ssid,
233                            sch->schid.sch_no);
234         return 0;
235
236 out_free:
237         dev_set_drvdata(&sch->dev, NULL);
238         vfio_ccw_free_private(private);
239         return ret;
240 }
241
242 static void vfio_ccw_sch_remove(struct subchannel *sch)
243 {
244         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
245
246         mdev_unregister_parent(&private->parent);
247
248         dev_set_drvdata(&sch->dev, NULL);
249
250         vfio_ccw_free_private(private);
251
252         VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
253                            sch->schid.cssid, sch->schid.ssid,
254                            sch->schid.sch_no);
255 }
256
257 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
258 {
259         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
260
261         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
262         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
263 }
264
265 /**
266  * vfio_ccw_sch_event - process subchannel event
267  * @sch: subchannel
268  * @process: non-zero if function is called in process context
269  *
270  * An unspecified event occurred for this subchannel. Adjust data according
271  * to the current operational state of the subchannel. Return zero when the
272  * event has been handled sufficiently or -EAGAIN when this function should
273  * be called again in process context.
274  */
275 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
276 {
277         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
278         unsigned long flags;
279         int rc = -EAGAIN;
280
281         spin_lock_irqsave(sch->lock, flags);
282         if (!device_is_registered(&sch->dev))
283                 goto out_unlock;
284
285         if (work_pending(&sch->todo_work))
286                 goto out_unlock;
287
288         rc = 0;
289
290         if (cio_update_schib(sch))
291                 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
292
293 out_unlock:
294         spin_unlock_irqrestore(sch->lock, flags);
295
296         return rc;
297 }
298
299 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
300                                unsigned int rsc,
301                                unsigned int erc,
302                                unsigned int rsid)
303 {
304         struct vfio_ccw_crw *crw;
305
306         /*
307          * If unable to allocate a CRW, just drop the event and
308          * carry on.  The guest will either see a later one or
309          * learn when it issues its own store subchannel.
310          */
311         crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
312         if (!crw)
313                 return;
314
315         /*
316          * Build the CRW based on the inputs given to us.
317          */
318         crw->crw.rsc = rsc;
319         crw->crw.erc = erc;
320         crw->crw.rsid = rsid;
321
322         list_add_tail(&crw->next, &private->crw);
323         queue_work(vfio_ccw_work_q, &private->crw_work);
324 }
325
326 static int vfio_ccw_chp_event(struct subchannel *sch,
327                               struct chp_link *link, int event)
328 {
329         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
330         int mask = chp_ssd_get_mask(&sch->ssd_info, link);
331         int retry = 255;
332
333         if (!private || !mask)
334                 return 0;
335
336         trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
337         VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
338                            sch->schid.cssid,
339                            sch->schid.ssid, sch->schid.sch_no,
340                            mask, event);
341
342         if (cio_update_schib(sch))
343                 return -ENODEV;
344
345         switch (event) {
346         case CHP_VARY_OFF:
347                 /* Path logically turned off */
348                 sch->opm &= ~mask;
349                 sch->lpm &= ~mask;
350                 if (sch->schib.pmcw.lpum & mask)
351                         cio_cancel_halt_clear(sch, &retry);
352                 break;
353         case CHP_OFFLINE:
354                 /* Path is gone */
355                 if (sch->schib.pmcw.lpum & mask)
356                         cio_cancel_halt_clear(sch, &retry);
357                 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
358                                    link->chpid.id);
359                 break;
360         case CHP_VARY_ON:
361                 /* Path logically turned on */
362                 sch->opm |= mask;
363                 sch->lpm |= mask;
364                 break;
365         case CHP_ONLINE:
366                 /* Path became available */
367                 sch->lpm |= mask & sch->opm;
368                 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
369                                    link->chpid.id);
370                 break;
371         }
372
373         return 0;
374 }
375
376 static struct css_device_id vfio_ccw_sch_ids[] = {
377         { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
378         { /* end of list */ },
379 };
380 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
381
382 static struct css_driver vfio_ccw_sch_driver = {
383         .drv = {
384                 .name = "vfio_ccw",
385                 .owner = THIS_MODULE,
386         },
387         .subchannel_type = vfio_ccw_sch_ids,
388         .irq = vfio_ccw_sch_irq,
389         .probe = vfio_ccw_sch_probe,
390         .remove = vfio_ccw_sch_remove,
391         .shutdown = vfio_ccw_sch_shutdown,
392         .sch_event = vfio_ccw_sch_event,
393         .chp_event = vfio_ccw_chp_event,
394 };
395
396 static int __init vfio_ccw_debug_init(void)
397 {
398         vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
399                                                11 * sizeof(long));
400         if (!vfio_ccw_debug_msg_id)
401                 goto out_unregister;
402         debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
403         debug_set_level(vfio_ccw_debug_msg_id, 2);
404         vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
405         if (!vfio_ccw_debug_trace_id)
406                 goto out_unregister;
407         debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
408         debug_set_level(vfio_ccw_debug_trace_id, 2);
409         return 0;
410
411 out_unregister:
412         debug_unregister(vfio_ccw_debug_msg_id);
413         debug_unregister(vfio_ccw_debug_trace_id);
414         return -1;
415 }
416
417 static void vfio_ccw_debug_exit(void)
418 {
419         debug_unregister(vfio_ccw_debug_msg_id);
420         debug_unregister(vfio_ccw_debug_trace_id);
421 }
422
423 static void vfio_ccw_destroy_regions(void)
424 {
425         kmem_cache_destroy(vfio_ccw_crw_region);
426         kmem_cache_destroy(vfio_ccw_schib_region);
427         kmem_cache_destroy(vfio_ccw_cmd_region);
428         kmem_cache_destroy(vfio_ccw_io_region);
429 }
430
431 static int __init vfio_ccw_sch_init(void)
432 {
433         int ret;
434
435         ret = vfio_ccw_debug_init();
436         if (ret)
437                 return ret;
438
439         vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
440         if (!vfio_ccw_work_q) {
441                 ret = -ENOMEM;
442                 goto out_regions;
443         }
444
445         vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
446                                         sizeof(struct ccw_io_region), 0,
447                                         SLAB_ACCOUNT, 0,
448                                         sizeof(struct ccw_io_region), NULL);
449         if (!vfio_ccw_io_region) {
450                 ret = -ENOMEM;
451                 goto out_regions;
452         }
453
454         vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
455                                         sizeof(struct ccw_cmd_region), 0,
456                                         SLAB_ACCOUNT, 0,
457                                         sizeof(struct ccw_cmd_region), NULL);
458         if (!vfio_ccw_cmd_region) {
459                 ret = -ENOMEM;
460                 goto out_regions;
461         }
462
463         vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
464                                         sizeof(struct ccw_schib_region), 0,
465                                         SLAB_ACCOUNT, 0,
466                                         sizeof(struct ccw_schib_region), NULL);
467
468         if (!vfio_ccw_schib_region) {
469                 ret = -ENOMEM;
470                 goto out_regions;
471         }
472
473         vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
474                                         sizeof(struct ccw_crw_region), 0,
475                                         SLAB_ACCOUNT, 0,
476                                         sizeof(struct ccw_crw_region), NULL);
477
478         if (!vfio_ccw_crw_region) {
479                 ret = -ENOMEM;
480                 goto out_regions;
481         }
482
483         ret = mdev_register_driver(&vfio_ccw_mdev_driver);
484         if (ret)
485                 goto out_regions;
486
487         isc_register(VFIO_CCW_ISC);
488         ret = css_driver_register(&vfio_ccw_sch_driver);
489         if (ret) {
490                 isc_unregister(VFIO_CCW_ISC);
491                 goto out_driver;
492         }
493
494         return ret;
495
496 out_driver:
497         mdev_unregister_driver(&vfio_ccw_mdev_driver);
498 out_regions:
499         vfio_ccw_destroy_regions();
500         destroy_workqueue(vfio_ccw_work_q);
501         vfio_ccw_debug_exit();
502         return ret;
503 }
504
505 static void __exit vfio_ccw_sch_exit(void)
506 {
507         css_driver_unregister(&vfio_ccw_sch_driver);
508         mdev_unregister_driver(&vfio_ccw_mdev_driver);
509         isc_unregister(VFIO_CCW_ISC);
510         vfio_ccw_destroy_regions();
511         destroy_workqueue(vfio_ccw_work_q);
512         vfio_ccw_debug_exit();
513 }
514 module_init(vfio_ccw_sch_init);
515 module_exit(vfio_ccw_sch_exit);
516
517 MODULE_LICENSE("GPL v2");