Merge tag 'kvm-ppc-next-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulu...
[linux-2.6-microblaze.git] / drivers / s390 / cio / vfio_ccw_drv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VFIO based Physical Subchannel device driver
4  *
5  * Copyright IBM Corp. 2017
6  * Copyright Red Hat, Inc. 2019
7  *
8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10  *            Cornelia Huck <cohuck@redhat.com>
11  */
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/uuid.h>
18 #include <linux/mdev.h>
19
20 #include <asm/isc.h>
21
22 #include "ioasm.h"
23 #include "css.h"
24 #include "vfio_ccw_private.h"
25
26 struct workqueue_struct *vfio_ccw_work_q;
27 static struct kmem_cache *vfio_ccw_io_region;
28 static struct kmem_cache *vfio_ccw_cmd_region;
29
30 /*
31  * Helpers
32  */
33 int vfio_ccw_sch_quiesce(struct subchannel *sch)
34 {
35         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
36         DECLARE_COMPLETION_ONSTACK(completion);
37         int iretry, ret = 0;
38
39         spin_lock_irq(sch->lock);
40         if (!sch->schib.pmcw.ena)
41                 goto out_unlock;
42         ret = cio_disable_subchannel(sch);
43         if (ret != -EBUSY)
44                 goto out_unlock;
45
46         iretry = 255;
47         do {
48
49                 ret = cio_cancel_halt_clear(sch, &iretry);
50
51                 if (ret == -EIO) {
52                         pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
53                                sch->schid.ssid, sch->schid.sch_no);
54                         break;
55                 }
56
57                 /*
58                  * Flush all I/O and wait for
59                  * cancel/halt/clear completion.
60                  */
61                 private->completion = &completion;
62                 spin_unlock_irq(sch->lock);
63
64                 if (ret == -EBUSY)
65                         wait_for_completion_timeout(&completion, 3*HZ);
66
67                 private->completion = NULL;
68                 flush_workqueue(vfio_ccw_work_q);
69                 spin_lock_irq(sch->lock);
70                 ret = cio_disable_subchannel(sch);
71         } while (ret == -EBUSY);
72 out_unlock:
73         private->state = VFIO_CCW_STATE_NOT_OPER;
74         spin_unlock_irq(sch->lock);
75         return ret;
76 }
77
78 static void vfio_ccw_sch_io_todo(struct work_struct *work)
79 {
80         struct vfio_ccw_private *private;
81         struct irb *irb;
82         bool is_final;
83
84         private = container_of(work, struct vfio_ccw_private, io_work);
85         irb = &private->irb;
86
87         is_final = !(scsw_actl(&irb->scsw) &
88                      (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
89         if (scsw_is_solicited(&irb->scsw)) {
90                 cp_update_scsw(&private->cp, &irb->scsw);
91                 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
92                         cp_free(&private->cp);
93         }
94         mutex_lock(&private->io_mutex);
95         memcpy(private->io_region->irb_area, irb, sizeof(*irb));
96         mutex_unlock(&private->io_mutex);
97
98         if (private->mdev && is_final)
99                 private->state = VFIO_CCW_STATE_IDLE;
100
101         if (private->io_trigger)
102                 eventfd_signal(private->io_trigger, 1);
103 }
104
105 /*
106  * Css driver callbacks
107  */
108 static void vfio_ccw_sch_irq(struct subchannel *sch)
109 {
110         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
111
112         inc_irq_stat(IRQIO_CIO);
113         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
114 }
115
116 static int vfio_ccw_sch_probe(struct subchannel *sch)
117 {
118         struct pmcw *pmcw = &sch->schib.pmcw;
119         struct vfio_ccw_private *private;
120         int ret = -ENOMEM;
121
122         if (pmcw->qf) {
123                 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
124                          dev_name(&sch->dev));
125                 return -ENODEV;
126         }
127
128         private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
129         if (!private)
130                 return -ENOMEM;
131
132         private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
133                                        GFP_KERNEL);
134         if (!private->cp.guest_cp)
135                 goto out_free;
136
137         private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
138                                                GFP_KERNEL | GFP_DMA);
139         if (!private->io_region)
140                 goto out_free;
141
142         private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
143                                                 GFP_KERNEL | GFP_DMA);
144         if (!private->cmd_region)
145                 goto out_free;
146
147         private->sch = sch;
148         dev_set_drvdata(&sch->dev, private);
149         mutex_init(&private->io_mutex);
150
151         spin_lock_irq(sch->lock);
152         private->state = VFIO_CCW_STATE_NOT_OPER;
153         sch->isc = VFIO_CCW_ISC;
154         ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
155         spin_unlock_irq(sch->lock);
156         if (ret)
157                 goto out_free;
158
159         INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
160         atomic_set(&private->avail, 1);
161         private->state = VFIO_CCW_STATE_STANDBY;
162
163         ret = vfio_ccw_mdev_reg(sch);
164         if (ret)
165                 goto out_disable;
166
167         return 0;
168
169 out_disable:
170         cio_disable_subchannel(sch);
171 out_free:
172         dev_set_drvdata(&sch->dev, NULL);
173         if (private->cmd_region)
174                 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
175         if (private->io_region)
176                 kmem_cache_free(vfio_ccw_io_region, private->io_region);
177         kfree(private->cp.guest_cp);
178         kfree(private);
179         return ret;
180 }
181
182 static int vfio_ccw_sch_remove(struct subchannel *sch)
183 {
184         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
185
186         vfio_ccw_sch_quiesce(sch);
187
188         vfio_ccw_mdev_unreg(sch);
189
190         dev_set_drvdata(&sch->dev, NULL);
191
192         kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
193         kmem_cache_free(vfio_ccw_io_region, private->io_region);
194         kfree(private->cp.guest_cp);
195         kfree(private);
196
197         return 0;
198 }
199
200 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
201 {
202         vfio_ccw_sch_quiesce(sch);
203 }
204
205 /**
206  * vfio_ccw_sch_event - process subchannel event
207  * @sch: subchannel
208  * @process: non-zero if function is called in process context
209  *
210  * An unspecified event occurred for this subchannel. Adjust data according
211  * to the current operational state of the subchannel. Return zero when the
212  * event has been handled sufficiently or -EAGAIN when this function should
213  * be called again in process context.
214  */
215 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
216 {
217         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
218         unsigned long flags;
219         int rc = -EAGAIN;
220
221         spin_lock_irqsave(sch->lock, flags);
222         if (!device_is_registered(&sch->dev))
223                 goto out_unlock;
224
225         if (work_pending(&sch->todo_work))
226                 goto out_unlock;
227
228         if (cio_update_schib(sch)) {
229                 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
230                 rc = 0;
231                 goto out_unlock;
232         }
233
234         private = dev_get_drvdata(&sch->dev);
235         if (private->state == VFIO_CCW_STATE_NOT_OPER) {
236                 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
237                                  VFIO_CCW_STATE_STANDBY;
238         }
239         rc = 0;
240
241 out_unlock:
242         spin_unlock_irqrestore(sch->lock, flags);
243
244         return rc;
245 }
246
247 static struct css_device_id vfio_ccw_sch_ids[] = {
248         { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
249         { /* end of list */ },
250 };
251 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
252
253 static struct css_driver vfio_ccw_sch_driver = {
254         .drv = {
255                 .name = "vfio_ccw",
256                 .owner = THIS_MODULE,
257         },
258         .subchannel_type = vfio_ccw_sch_ids,
259         .irq = vfio_ccw_sch_irq,
260         .probe = vfio_ccw_sch_probe,
261         .remove = vfio_ccw_sch_remove,
262         .shutdown = vfio_ccw_sch_shutdown,
263         .sch_event = vfio_ccw_sch_event,
264 };
265
266 static int __init vfio_ccw_sch_init(void)
267 {
268         int ret = -ENOMEM;
269
270         vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
271         if (!vfio_ccw_work_q)
272                 return -ENOMEM;
273
274         vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
275                                         sizeof(struct ccw_io_region), 0,
276                                         SLAB_ACCOUNT, 0,
277                                         sizeof(struct ccw_io_region), NULL);
278         if (!vfio_ccw_io_region)
279                 goto out_err;
280
281         vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
282                                         sizeof(struct ccw_cmd_region), 0,
283                                         SLAB_ACCOUNT, 0,
284                                         sizeof(struct ccw_cmd_region), NULL);
285         if (!vfio_ccw_cmd_region)
286                 goto out_err;
287
288         isc_register(VFIO_CCW_ISC);
289         ret = css_driver_register(&vfio_ccw_sch_driver);
290         if (ret) {
291                 isc_unregister(VFIO_CCW_ISC);
292                 goto out_err;
293         }
294
295         return ret;
296
297 out_err:
298         kmem_cache_destroy(vfio_ccw_cmd_region);
299         kmem_cache_destroy(vfio_ccw_io_region);
300         destroy_workqueue(vfio_ccw_work_q);
301         return ret;
302 }
303
304 static void __exit vfio_ccw_sch_exit(void)
305 {
306         css_driver_unregister(&vfio_ccw_sch_driver);
307         isc_unregister(VFIO_CCW_ISC);
308         kmem_cache_destroy(vfio_ccw_io_region);
309         kmem_cache_destroy(vfio_ccw_cmd_region);
310         destroy_workqueue(vfio_ccw_work_q);
311 }
312 module_init(vfio_ccw_sch_init);
313 module_exit(vfio_ccw_sch_exit);
314
315 MODULE_LICENSE("GPL v2");