nds32: fix build error "relocation truncated to fit: R_NDS32_25_PCREL_RELA" when
[linux-2.6-microblaze.git] / drivers / s390 / cio / vfio_ccw_drv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VFIO based Physical Subchannel device driver
4  *
5  * Copyright IBM Corp. 2017
6  *
7  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
8  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/mdev.h>
17
18 #include <asm/isc.h>
19
20 #include "ioasm.h"
21 #include "css.h"
22 #include "vfio_ccw_private.h"
23
24 struct workqueue_struct *vfio_ccw_work_q;
25
26 /*
27  * Helpers
28  */
29 int vfio_ccw_sch_quiesce(struct subchannel *sch)
30 {
31         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
32         DECLARE_COMPLETION_ONSTACK(completion);
33         int iretry, ret = 0;
34
35         spin_lock_irq(sch->lock);
36         if (!sch->schib.pmcw.ena)
37                 goto out_unlock;
38         ret = cio_disable_subchannel(sch);
39         if (ret != -EBUSY)
40                 goto out_unlock;
41
42         do {
43                 iretry = 255;
44
45                 ret = cio_cancel_halt_clear(sch, &iretry);
46                 while (ret == -EBUSY) {
47                         /*
48                          * Flush all I/O and wait for
49                          * cancel/halt/clear completion.
50                          */
51                         private->completion = &completion;
52                         spin_unlock_irq(sch->lock);
53
54                         wait_for_completion_timeout(&completion, 3*HZ);
55
56                         spin_lock_irq(sch->lock);
57                         private->completion = NULL;
58                         flush_workqueue(vfio_ccw_work_q);
59                         ret = cio_cancel_halt_clear(sch, &iretry);
60                 };
61
62                 ret = cio_disable_subchannel(sch);
63         } while (ret == -EBUSY);
64 out_unlock:
65         private->state = VFIO_CCW_STATE_NOT_OPER;
66         spin_unlock_irq(sch->lock);
67         return ret;
68 }
69
70 static void vfio_ccw_sch_io_todo(struct work_struct *work)
71 {
72         struct vfio_ccw_private *private;
73         struct irb *irb;
74
75         private = container_of(work, struct vfio_ccw_private, io_work);
76         irb = &private->irb;
77
78         if (scsw_is_solicited(&irb->scsw)) {
79                 cp_update_scsw(&private->cp, &irb->scsw);
80                 cp_free(&private->cp);
81         }
82         memcpy(private->io_region.irb_area, irb, sizeof(*irb));
83
84         if (private->io_trigger)
85                 eventfd_signal(private->io_trigger, 1);
86
87         if (private->mdev)
88                 private->state = VFIO_CCW_STATE_IDLE;
89 }
90
91 /*
92  * Css driver callbacks
93  */
94 static void vfio_ccw_sch_irq(struct subchannel *sch)
95 {
96         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
97
98         inc_irq_stat(IRQIO_CIO);
99         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
100 }
101
102 static int vfio_ccw_sch_probe(struct subchannel *sch)
103 {
104         struct pmcw *pmcw = &sch->schib.pmcw;
105         struct vfio_ccw_private *private;
106         int ret;
107
108         if (pmcw->qf) {
109                 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
110                          dev_name(&sch->dev));
111                 return -ENODEV;
112         }
113
114         private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
115         if (!private)
116                 return -ENOMEM;
117         private->sch = sch;
118         dev_set_drvdata(&sch->dev, private);
119
120         spin_lock_irq(sch->lock);
121         private->state = VFIO_CCW_STATE_NOT_OPER;
122         sch->isc = VFIO_CCW_ISC;
123         ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
124         spin_unlock_irq(sch->lock);
125         if (ret)
126                 goto out_free;
127
128         ret = vfio_ccw_mdev_reg(sch);
129         if (ret)
130                 goto out_disable;
131
132         INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
133         atomic_set(&private->avail, 1);
134         private->state = VFIO_CCW_STATE_STANDBY;
135
136         return 0;
137
138 out_disable:
139         cio_disable_subchannel(sch);
140 out_free:
141         dev_set_drvdata(&sch->dev, NULL);
142         kfree(private);
143         return ret;
144 }
145
146 static int vfio_ccw_sch_remove(struct subchannel *sch)
147 {
148         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
149
150         vfio_ccw_sch_quiesce(sch);
151
152         vfio_ccw_mdev_unreg(sch);
153
154         dev_set_drvdata(&sch->dev, NULL);
155
156         kfree(private);
157
158         return 0;
159 }
160
161 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
162 {
163         vfio_ccw_sch_quiesce(sch);
164 }
165
166 /**
167  * vfio_ccw_sch_event - process subchannel event
168  * @sch: subchannel
169  * @process: non-zero if function is called in process context
170  *
171  * An unspecified event occurred for this subchannel. Adjust data according
172  * to the current operational state of the subchannel. Return zero when the
173  * event has been handled sufficiently or -EAGAIN when this function should
174  * be called again in process context.
175  */
176 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
177 {
178         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
179         unsigned long flags;
180
181         spin_lock_irqsave(sch->lock, flags);
182         if (!device_is_registered(&sch->dev))
183                 goto out_unlock;
184
185         if (work_pending(&sch->todo_work))
186                 goto out_unlock;
187
188         if (cio_update_schib(sch)) {
189                 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
190                 goto out_unlock;
191         }
192
193         private = dev_get_drvdata(&sch->dev);
194         if (private->state == VFIO_CCW_STATE_NOT_OPER) {
195                 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
196                                  VFIO_CCW_STATE_STANDBY;
197         }
198
199 out_unlock:
200         spin_unlock_irqrestore(sch->lock, flags);
201
202         return 0;
203 }
204
205 static struct css_device_id vfio_ccw_sch_ids[] = {
206         { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
207         { /* end of list */ },
208 };
209 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
210
211 static struct css_driver vfio_ccw_sch_driver = {
212         .drv = {
213                 .name = "vfio_ccw",
214                 .owner = THIS_MODULE,
215         },
216         .subchannel_type = vfio_ccw_sch_ids,
217         .irq = vfio_ccw_sch_irq,
218         .probe = vfio_ccw_sch_probe,
219         .remove = vfio_ccw_sch_remove,
220         .shutdown = vfio_ccw_sch_shutdown,
221         .sch_event = vfio_ccw_sch_event,
222 };
223
224 static int __init vfio_ccw_sch_init(void)
225 {
226         int ret;
227
228         vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
229         if (!vfio_ccw_work_q)
230                 return -ENOMEM;
231
232         isc_register(VFIO_CCW_ISC);
233         ret = css_driver_register(&vfio_ccw_sch_driver);
234         if (ret) {
235                 isc_unregister(VFIO_CCW_ISC);
236                 destroy_workqueue(vfio_ccw_work_q);
237         }
238
239         return ret;
240 }
241
242 static void __exit vfio_ccw_sch_exit(void)
243 {
244         css_driver_unregister(&vfio_ccw_sch_driver);
245         isc_unregister(VFIO_CCW_ISC);
246         destroy_workqueue(vfio_ccw_work_q);
247 }
248 module_init(vfio_ccw_sch_init);
249 module_exit(vfio_ccw_sch_exit);
250
251 MODULE_LICENSE("GPL v2");