Merge branch 'for-4.18/alps' into for-linus
[linux-2.6-microblaze.git] / drivers / hv / ring_buffer.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/prefetch.h>
33
34 #include "hyperv_vmbus.h"
35
36 #define VMBUS_PKT_TRAILER       8
37
38 /*
39  * When we write to the ring buffer, check if the host needs to
40  * be signaled. Here is the details of this protocol:
41  *
42  *      1. The host guarantees that while it is draining the
43  *         ring buffer, it will set the interrupt_mask to
44  *         indicate it does not need to be interrupted when
45  *         new data is placed.
46  *
47  *      2. The host guarantees that it will completely drain
48  *         the ring buffer before exiting the read loop. Further,
49  *         once the ring buffer is empty, it will clear the
50  *         interrupt_mask and re-check to see if new data has
51  *         arrived.
52  *
53  * KYS: Oct. 30, 2016:
54  * It looks like Windows hosts have logic to deal with DOS attacks that
55  * can be triggered if it receives interrupts when it is not expecting
56  * the interrupt. The host expects interrupts only when the ring
57  * transitions from empty to non-empty (or full to non full on the guest
58  * to host ring).
59  * So, base the signaling decision solely on the ring state until the
60  * host logic is fixed.
61  */
62
63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64 {
65         struct hv_ring_buffer_info *rbi = &channel->outbound;
66
67         virt_mb();
68         if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69                 return;
70
71         /* check interrupt_mask before read_index */
72         virt_rmb();
73         /*
74          * This is the only case we need to signal when the
75          * ring transitions from being empty to non-empty.
76          */
77         if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78                 vmbus_setevent(channel);
79 }
80
81 /* Get the next write location for the specified ring buffer. */
82 static inline u32
83 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84 {
85         u32 next = ring_info->ring_buffer->write_index;
86
87         return next;
88 }
89
90 /* Set the next write location for the specified ring buffer. */
91 static inline void
92 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93                      u32 next_write_location)
94 {
95         ring_info->ring_buffer->write_index = next_write_location;
96 }
97
98 /* Set the next read location for the specified ring buffer. */
99 static inline void
100 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101                     u32 next_read_location)
102 {
103         ring_info->ring_buffer->read_index = next_read_location;
104         ring_info->priv_read_index = next_read_location;
105 }
106
107 /* Get the size of the ring buffer. */
108 static inline u32
109 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110 {
111         return ring_info->ring_datasize;
112 }
113
114 /* Get the read and write indices as u64 of the specified ring buffer. */
115 static inline u64
116 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117 {
118         return (u64)ring_info->ring_buffer->write_index << 32;
119 }
120
121 /*
122  * Helper routine to copy from source to ring buffer.
123  * Assume there is enough room. Handles wrap-around in dest case only!!
124  */
125 static u32 hv_copyto_ringbuffer(
126         struct hv_ring_buffer_info      *ring_info,
127         u32                             start_write_offset,
128         const void                      *src,
129         u32                             srclen)
130 {
131         void *ring_buffer = hv_get_ring_buffer(ring_info);
132         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133
134         memcpy(ring_buffer + start_write_offset, src, srclen);
135
136         start_write_offset += srclen;
137         if (start_write_offset >= ring_buffer_size)
138                 start_write_offset -= ring_buffer_size;
139
140         return start_write_offset;
141 }
142
143 /*
144  *
145  * hv_get_ringbuffer_availbytes()
146  *
147  * Get number of bytes available to read and to write to
148  * for the specified ring buffer
149  */
150 static void
151 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
152                              u32 *read, u32 *write)
153 {
154         u32 read_loc, write_loc, dsize;
155
156         /* Capture the read/write indices before they changed */
157         read_loc = READ_ONCE(rbi->ring_buffer->read_index);
158         write_loc = READ_ONCE(rbi->ring_buffer->write_index);
159         dsize = rbi->ring_datasize;
160
161         *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
162                 read_loc - write_loc;
163         *read = dsize - *write;
164 }
165
166 /* Get various debug metrics for the specified ring buffer. */
167 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
168                                  struct hv_ring_buffer_debug_info *debug_info)
169 {
170         u32 bytes_avail_towrite;
171         u32 bytes_avail_toread;
172
173         if (ring_info->ring_buffer) {
174                 hv_get_ringbuffer_availbytes(ring_info,
175                                         &bytes_avail_toread,
176                                         &bytes_avail_towrite);
177
178                 debug_info->bytes_avail_toread = bytes_avail_toread;
179                 debug_info->bytes_avail_towrite = bytes_avail_towrite;
180                 debug_info->current_read_index =
181                         ring_info->ring_buffer->read_index;
182                 debug_info->current_write_index =
183                         ring_info->ring_buffer->write_index;
184                 debug_info->current_interrupt_mask =
185                         ring_info->ring_buffer->interrupt_mask;
186         }
187 }
188 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189
190 /* Initialize the ring buffer. */
191 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
192                        struct page *pages, u32 page_cnt)
193 {
194         int i;
195         struct page **pages_wraparound;
196
197         BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
198
199         memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
200
201         /*
202          * First page holds struct hv_ring_buffer, do wraparound mapping for
203          * the rest.
204          */
205         pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
206                                    GFP_KERNEL);
207         if (!pages_wraparound)
208                 return -ENOMEM;
209
210         pages_wraparound[0] = pages;
211         for (i = 0; i < 2 * (page_cnt - 1); i++)
212                 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
213
214         ring_info->ring_buffer = (struct hv_ring_buffer *)
215                 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
216
217         kfree(pages_wraparound);
218
219
220         if (!ring_info->ring_buffer)
221                 return -ENOMEM;
222
223         ring_info->ring_buffer->read_index =
224                 ring_info->ring_buffer->write_index = 0;
225
226         /* Set the feature bit for enabling flow control. */
227         ring_info->ring_buffer->feature_bits.value = 1;
228
229         ring_info->ring_size = page_cnt << PAGE_SHIFT;
230         ring_info->ring_datasize = ring_info->ring_size -
231                 sizeof(struct hv_ring_buffer);
232
233         spin_lock_init(&ring_info->ring_lock);
234
235         return 0;
236 }
237
238 /* Cleanup the ring buffer. */
239 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
240 {
241         vunmap(ring_info->ring_buffer);
242 }
243
244 /* Write to the ring buffer. */
245 int hv_ringbuffer_write(struct vmbus_channel *channel,
246                         const struct kvec *kv_list, u32 kv_count)
247 {
248         int i;
249         u32 bytes_avail_towrite;
250         u32 totalbytes_towrite = sizeof(u64);
251         u32 next_write_location;
252         u32 old_write;
253         u64 prev_indices;
254         unsigned long flags;
255         struct hv_ring_buffer_info *outring_info = &channel->outbound;
256
257         if (channel->rescind)
258                 return -ENODEV;
259
260         for (i = 0; i < kv_count; i++)
261                 totalbytes_towrite += kv_list[i].iov_len;
262
263         spin_lock_irqsave(&outring_info->ring_lock, flags);
264
265         bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
266
267         /*
268          * If there is only room for the packet, assume it is full.
269          * Otherwise, the next time around, we think the ring buffer
270          * is empty since the read index == write index.
271          */
272         if (bytes_avail_towrite <= totalbytes_towrite) {
273                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
274                 return -EAGAIN;
275         }
276
277         /* Write to the ring buffer */
278         next_write_location = hv_get_next_write_location(outring_info);
279
280         old_write = next_write_location;
281
282         for (i = 0; i < kv_count; i++) {
283                 next_write_location = hv_copyto_ringbuffer(outring_info,
284                                                      next_write_location,
285                                                      kv_list[i].iov_base,
286                                                      kv_list[i].iov_len);
287         }
288
289         /* Set previous packet start */
290         prev_indices = hv_get_ring_bufferindices(outring_info);
291
292         next_write_location = hv_copyto_ringbuffer(outring_info,
293                                              next_write_location,
294                                              &prev_indices,
295                                              sizeof(u64));
296
297         /* Issue a full memory barrier before updating the write index */
298         virt_mb();
299
300         /* Now, update the write location */
301         hv_set_next_write_location(outring_info, next_write_location);
302
303
304         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
305
306         hv_signal_on_write(old_write, channel);
307
308         if (channel->rescind)
309                 return -ENODEV;
310
311         return 0;
312 }
313
314 int hv_ringbuffer_read(struct vmbus_channel *channel,
315                        void *buffer, u32 buflen, u32 *buffer_actual_len,
316                        u64 *requestid, bool raw)
317 {
318         struct vmpacket_descriptor *desc;
319         u32 packetlen, offset;
320
321         if (unlikely(buflen == 0))
322                 return -EINVAL;
323
324         *buffer_actual_len = 0;
325         *requestid = 0;
326
327         /* Make sure there is something to read */
328         desc = hv_pkt_iter_first(channel);
329         if (desc == NULL) {
330                 /*
331                  * No error is set when there is even no header, drivers are
332                  * supposed to analyze buffer_actual_len.
333                  */
334                 return 0;
335         }
336
337         offset = raw ? 0 : (desc->offset8 << 3);
338         packetlen = (desc->len8 << 3) - offset;
339         *buffer_actual_len = packetlen;
340         *requestid = desc->trans_id;
341
342         if (unlikely(packetlen > buflen))
343                 return -ENOBUFS;
344
345         /* since ring is double mapped, only one copy is necessary */
346         memcpy(buffer, (const char *)desc + offset, packetlen);
347
348         /* Advance ring index to next packet descriptor */
349         __hv_pkt_iter_next(channel, desc);
350
351         /* Notify host of update */
352         hv_pkt_iter_close(channel);
353
354         return 0;
355 }
356
357 /*
358  * Determine number of bytes available in ring buffer after
359  * the current iterator (priv_read_index) location.
360  *
361  * This is similar to hv_get_bytes_to_read but with private
362  * read index instead.
363  */
364 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
365 {
366         u32 priv_read_loc = rbi->priv_read_index;
367         u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
368
369         if (write_loc >= priv_read_loc)
370                 return write_loc - priv_read_loc;
371         else
372                 return (rbi->ring_datasize - priv_read_loc) + write_loc;
373 }
374
375 /*
376  * Get first vmbus packet from ring buffer after read_index
377  *
378  * If ring buffer is empty, returns NULL and no other action needed.
379  */
380 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
381 {
382         struct hv_ring_buffer_info *rbi = &channel->inbound;
383         struct vmpacket_descriptor *desc;
384
385         if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
386                 return NULL;
387
388         desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
389         if (desc)
390                 prefetch((char *)desc + (desc->len8 << 3));
391
392         return desc;
393 }
394 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
395
396 /*
397  * Get next vmbus packet from ring buffer.
398  *
399  * Advances the current location (priv_read_index) and checks for more
400  * data. If the end of the ring buffer is reached, then return NULL.
401  */
402 struct vmpacket_descriptor *
403 __hv_pkt_iter_next(struct vmbus_channel *channel,
404                    const struct vmpacket_descriptor *desc)
405 {
406         struct hv_ring_buffer_info *rbi = &channel->inbound;
407         u32 packetlen = desc->len8 << 3;
408         u32 dsize = rbi->ring_datasize;
409
410         /* bump offset to next potential packet */
411         rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
412         if (rbi->priv_read_index >= dsize)
413                 rbi->priv_read_index -= dsize;
414
415         /* more data? */
416         return hv_pkt_iter_first(channel);
417 }
418 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
419
420 /* How many bytes were read in this iterator cycle */
421 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
422                                         u32 start_read_index)
423 {
424         if (rbi->priv_read_index >= start_read_index)
425                 return rbi->priv_read_index - start_read_index;
426         else
427                 return rbi->ring_datasize - start_read_index +
428                         rbi->priv_read_index;
429 }
430
431 /*
432  * Update host ring buffer after iterating over packets.
433  */
434 void hv_pkt_iter_close(struct vmbus_channel *channel)
435 {
436         struct hv_ring_buffer_info *rbi = &channel->inbound;
437         u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
438
439         /*
440          * Make sure all reads are done before we update the read index since
441          * the writer may start writing to the read area once the read index
442          * is updated.
443          */
444         virt_rmb();
445         start_read_index = rbi->ring_buffer->read_index;
446         rbi->ring_buffer->read_index = rbi->priv_read_index;
447
448         if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
449                 return;
450
451         /*
452          * Issue a full memory barrier before making the signaling decision.
453          * Here is the reason for having this barrier:
454          * If the reading of the pend_sz (in this function)
455          * were to be reordered and read before we commit the new read
456          * index (in the calling function)  we could
457          * have a problem. If the host were to set the pending_sz after we
458          * have sampled pending_sz and go to sleep before we commit the
459          * read index, we could miss sending the interrupt. Issue a full
460          * memory barrier to address this.
461          */
462         virt_mb();
463
464         pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
465         if (!pending_sz)
466                 return;
467
468         /*
469          * Ensure the read of write_index in hv_get_bytes_to_write()
470          * happens after the read of pending_send_sz.
471          */
472         virt_rmb();
473         curr_write_sz = hv_get_bytes_to_write(rbi);
474         bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
475
476         /*
477          * If there was space before we began iteration,
478          * then host was not blocked.
479          */
480
481         if (curr_write_sz - bytes_read > pending_sz)
482                 return;
483
484         /* If pending write will not fit, don't give false hope. */
485         if (curr_write_sz <= pending_sz)
486                 return;
487
488         vmbus_setevent(channel);
489 }
490 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);