Merge tag 'fsnotify_for_v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / sound / xen / xen_snd_front.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4  * Xen para-virtual sound device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10
11 #include <linux/delay.h>
12 #include <linux/module.h>
13
14 #include <xen/page.h>
15 #include <xen/platform_pci.h>
16 #include <xen/xen.h>
17 #include <xen/xenbus.h>
18
19 #include <xen/interface/io/sndif.h>
20
21 #include "xen_snd_front.h"
22 #include "xen_snd_front_alsa.h"
23 #include "xen_snd_front_evtchnl.h"
24 #include "xen_snd_front_shbuf.h"
25
26 static struct xensnd_req *
27 be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
28 {
29         struct xensnd_req *req;
30
31         req = RING_GET_REQUEST(&evtchnl->u.req.ring,
32                                evtchnl->u.req.ring.req_prod_pvt);
33         req->operation = operation;
34         req->id = evtchnl->evt_next_id++;
35         evtchnl->evt_id = req->id;
36         return req;
37 }
38
39 static int be_stream_do_io(struct xen_snd_front_evtchnl *evtchnl)
40 {
41         if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
42                 return -EIO;
43
44         reinit_completion(&evtchnl->u.req.completion);
45         xen_snd_front_evtchnl_flush(evtchnl);
46         return 0;
47 }
48
49 static int be_stream_wait_io(struct xen_snd_front_evtchnl *evtchnl)
50 {
51         if (wait_for_completion_timeout(&evtchnl->u.req.completion,
52                         msecs_to_jiffies(VSND_WAIT_BACK_MS)) <= 0)
53                 return -ETIMEDOUT;
54
55         return evtchnl->u.req.resp_status;
56 }
57
58 int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
59                                         struct xensnd_query_hw_param *hw_param_req,
60                                         struct xensnd_query_hw_param *hw_param_resp)
61 {
62         struct xensnd_req *req;
63         int ret;
64
65         mutex_lock(&evtchnl->u.req.req_io_lock);
66
67         mutex_lock(&evtchnl->ring_io_lock);
68         req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
69         req->op.hw_param = *hw_param_req;
70         mutex_unlock(&evtchnl->ring_io_lock);
71
72         ret = be_stream_do_io(evtchnl);
73
74         if (ret == 0)
75                 ret = be_stream_wait_io(evtchnl);
76
77         if (ret == 0)
78                 *hw_param_resp = evtchnl->u.req.resp.hw_param;
79
80         mutex_unlock(&evtchnl->u.req.req_io_lock);
81         return ret;
82 }
83
84 int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
85                                  struct xen_snd_front_shbuf *sh_buf,
86                                  u8 format, unsigned int channels,
87                                  unsigned int rate, u32 buffer_sz,
88                                  u32 period_sz)
89 {
90         struct xensnd_req *req;
91         int ret;
92
93         mutex_lock(&evtchnl->u.req.req_io_lock);
94
95         mutex_lock(&evtchnl->ring_io_lock);
96         req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
97         req->op.open.pcm_format = format;
98         req->op.open.pcm_channels = channels;
99         req->op.open.pcm_rate = rate;
100         req->op.open.buffer_sz = buffer_sz;
101         req->op.open.period_sz = period_sz;
102         req->op.open.gref_directory = xen_snd_front_shbuf_get_dir_start(sh_buf);
103         mutex_unlock(&evtchnl->ring_io_lock);
104
105         ret = be_stream_do_io(evtchnl);
106
107         if (ret == 0)
108                 ret = be_stream_wait_io(evtchnl);
109
110         mutex_unlock(&evtchnl->u.req.req_io_lock);
111         return ret;
112 }
113
114 int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
115 {
116         struct xensnd_req *req;
117         int ret;
118
119         mutex_lock(&evtchnl->u.req.req_io_lock);
120
121         mutex_lock(&evtchnl->ring_io_lock);
122         req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
123         mutex_unlock(&evtchnl->ring_io_lock);
124
125         ret = be_stream_do_io(evtchnl);
126
127         if (ret == 0)
128                 ret = be_stream_wait_io(evtchnl);
129
130         mutex_unlock(&evtchnl->u.req.req_io_lock);
131         return ret;
132 }
133
134 int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
135                                unsigned long pos, unsigned long count)
136 {
137         struct xensnd_req *req;
138         int ret;
139
140         mutex_lock(&evtchnl->u.req.req_io_lock);
141
142         mutex_lock(&evtchnl->ring_io_lock);
143         req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
144         req->op.rw.length = count;
145         req->op.rw.offset = pos;
146         mutex_unlock(&evtchnl->ring_io_lock);
147
148         ret = be_stream_do_io(evtchnl);
149
150         if (ret == 0)
151                 ret = be_stream_wait_io(evtchnl);
152
153         mutex_unlock(&evtchnl->u.req.req_io_lock);
154         return ret;
155 }
156
157 int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
158                               unsigned long pos, unsigned long count)
159 {
160         struct xensnd_req *req;
161         int ret;
162
163         mutex_lock(&evtchnl->u.req.req_io_lock);
164
165         mutex_lock(&evtchnl->ring_io_lock);
166         req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
167         req->op.rw.length = count;
168         req->op.rw.offset = pos;
169         mutex_unlock(&evtchnl->ring_io_lock);
170
171         ret = be_stream_do_io(evtchnl);
172
173         if (ret == 0)
174                 ret = be_stream_wait_io(evtchnl);
175
176         mutex_unlock(&evtchnl->u.req.req_io_lock);
177         return ret;
178 }
179
180 int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
181                                  int type)
182 {
183         struct xensnd_req *req;
184         int ret;
185
186         mutex_lock(&evtchnl->u.req.req_io_lock);
187
188         mutex_lock(&evtchnl->ring_io_lock);
189         req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
190         req->op.trigger.type = type;
191         mutex_unlock(&evtchnl->ring_io_lock);
192
193         ret = be_stream_do_io(evtchnl);
194
195         if (ret == 0)
196                 ret = be_stream_wait_io(evtchnl);
197
198         mutex_unlock(&evtchnl->u.req.req_io_lock);
199         return ret;
200 }
201
202 static void xen_snd_drv_fini(struct xen_snd_front_info *front_info)
203 {
204         xen_snd_front_alsa_fini(front_info);
205         xen_snd_front_evtchnl_free_all(front_info);
206 }
207
208 static int sndback_initwait(struct xen_snd_front_info *front_info)
209 {
210         int num_streams;
211         int ret;
212
213         ret = xen_snd_front_cfg_card(front_info, &num_streams);
214         if (ret < 0)
215                 return ret;
216
217         /* create event channels for all streams and publish */
218         ret = xen_snd_front_evtchnl_create_all(front_info, num_streams);
219         if (ret < 0)
220                 return ret;
221
222         return xen_snd_front_evtchnl_publish_all(front_info);
223 }
224
225 static int sndback_connect(struct xen_snd_front_info *front_info)
226 {
227         return xen_snd_front_alsa_init(front_info);
228 }
229
230 static void sndback_disconnect(struct xen_snd_front_info *front_info)
231 {
232         xen_snd_drv_fini(front_info);
233         xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising);
234 }
235
236 static void sndback_changed(struct xenbus_device *xb_dev,
237                             enum xenbus_state backend_state)
238 {
239         struct xen_snd_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
240         int ret;
241
242         dev_dbg(&xb_dev->dev, "Backend state is %s, front is %s\n",
243                 xenbus_strstate(backend_state),
244                 xenbus_strstate(xb_dev->state));
245
246         switch (backend_state) {
247         case XenbusStateReconfiguring:
248                 /* fall through */
249         case XenbusStateReconfigured:
250                 /* fall through */
251         case XenbusStateInitialised:
252                 /* fall through */
253                 break;
254
255         case XenbusStateInitialising:
256                 /* Recovering after backend unexpected closure. */
257                 sndback_disconnect(front_info);
258                 break;
259
260         case XenbusStateInitWait:
261                 /* Recovering after backend unexpected closure. */
262                 sndback_disconnect(front_info);
263
264                 ret = sndback_initwait(front_info);
265                 if (ret < 0)
266                         xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
267                 else
268                         xenbus_switch_state(xb_dev, XenbusStateInitialised);
269                 break;
270
271         case XenbusStateConnected:
272                 if (xb_dev->state != XenbusStateInitialised)
273                         break;
274
275                 ret = sndback_connect(front_info);
276                 if (ret < 0)
277                         xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
278                 else
279                         xenbus_switch_state(xb_dev, XenbusStateConnected);
280                 break;
281
282         case XenbusStateClosing:
283                 /*
284                  * In this state backend starts freeing resources,
285                  * so let it go into closed state first, so we can also
286                  * remove ours.
287                  */
288                 break;
289
290         case XenbusStateUnknown:
291                 /* fall through */
292         case XenbusStateClosed:
293                 if (xb_dev->state == XenbusStateClosed)
294                         break;
295
296                 sndback_disconnect(front_info);
297                 break;
298         }
299 }
300
301 static int xen_drv_probe(struct xenbus_device *xb_dev,
302                          const struct xenbus_device_id *id)
303 {
304         struct xen_snd_front_info *front_info;
305
306         front_info = devm_kzalloc(&xb_dev->dev,
307                                   sizeof(*front_info), GFP_KERNEL);
308         if (!front_info)
309                 return -ENOMEM;
310
311         front_info->xb_dev = xb_dev;
312         dev_set_drvdata(&xb_dev->dev, front_info);
313
314         return xenbus_switch_state(xb_dev, XenbusStateInitialising);
315 }
316
317 static int xen_drv_remove(struct xenbus_device *dev)
318 {
319         struct xen_snd_front_info *front_info = dev_get_drvdata(&dev->dev);
320         int to = 100;
321
322         xenbus_switch_state(dev, XenbusStateClosing);
323
324         /*
325          * On driver removal it is disconnected from XenBus,
326          * so no backend state change events come via .otherend_changed
327          * callback. This prevents us from exiting gracefully, e.g.
328          * signaling the backend to free event channels, waiting for its
329          * state to change to XenbusStateClosed and cleaning at our end.
330          * Normally when front driver removed backend will finally go into
331          * XenbusStateInitWait state.
332          *
333          * Workaround: read backend's state manually and wait with time-out.
334          */
335         while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
336                                      XenbusStateUnknown) != XenbusStateInitWait) &&
337                --to)
338                 msleep(10);
339
340         if (!to) {
341                 unsigned int state;
342
343                 state = xenbus_read_unsigned(front_info->xb_dev->otherend,
344                                              "state", XenbusStateUnknown);
345                 pr_err("Backend state is %s while removing driver\n",
346                        xenbus_strstate(state));
347         }
348
349         xen_snd_drv_fini(front_info);
350         xenbus_frontend_closed(dev);
351         return 0;
352 }
353
354 static const struct xenbus_device_id xen_drv_ids[] = {
355         { XENSND_DRIVER_NAME },
356         { "" }
357 };
358
359 static struct xenbus_driver xen_driver = {
360         .ids = xen_drv_ids,
361         .probe = xen_drv_probe,
362         .remove = xen_drv_remove,
363         .otherend_changed = sndback_changed,
364 };
365
366 static int __init xen_drv_init(void)
367 {
368         if (!xen_domain())
369                 return -ENODEV;
370
371         if (!xen_has_pv_devices())
372                 return -ENODEV;
373
374         /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
375         if (XEN_PAGE_SIZE != PAGE_SIZE) {
376                 pr_err(XENSND_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
377                        XEN_PAGE_SIZE, PAGE_SIZE);
378                 return -ENODEV;
379         }
380
381         pr_info("Initialising Xen " XENSND_DRIVER_NAME " frontend driver\n");
382         return xenbus_register_frontend(&xen_driver);
383 }
384
385 static void __exit xen_drv_fini(void)
386 {
387         pr_info("Unregistering Xen " XENSND_DRIVER_NAME " frontend driver\n");
388         xenbus_unregister_driver(&xen_driver);
389 }
390
391 module_init(xen_drv_init);
392 module_exit(xen_drv_fini);
393
394 MODULE_DESCRIPTION("Xen virtual sound device frontend");
395 MODULE_LICENSE("GPL");
396 MODULE_ALIAS("xen:" XENSND_DRIVER_NAME);
397 MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual soundcard}}");