input: add MT_TOOL_DIAL
[linux-2.6-microblaze.git] / drivers / hv / ring_buffer.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/prefetch.h>
33
34 #include "hyperv_vmbus.h"
35
36 #define VMBUS_PKT_TRAILER       8
37
38 /*
39  * When we write to the ring buffer, check if the host needs to
40  * be signaled. Here is the details of this protocol:
41  *
42  *      1. The host guarantees that while it is draining the
43  *         ring buffer, it will set the interrupt_mask to
44  *         indicate it does not need to be interrupted when
45  *         new data is placed.
46  *
47  *      2. The host guarantees that it will completely drain
48  *         the ring buffer before exiting the read loop. Further,
49  *         once the ring buffer is empty, it will clear the
50  *         interrupt_mask and re-check to see if new data has
51  *         arrived.
52  *
53  * KYS: Oct. 30, 2016:
54  * It looks like Windows hosts have logic to deal with DOS attacks that
55  * can be triggered if it receives interrupts when it is not expecting
56  * the interrupt. The host expects interrupts only when the ring
57  * transitions from empty to non-empty (or full to non full on the guest
58  * to host ring).
59  * So, base the signaling decision solely on the ring state until the
60  * host logic is fixed.
61  */
62
63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64 {
65         struct hv_ring_buffer_info *rbi = &channel->outbound;
66
67         virt_mb();
68         if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69                 return;
70
71         /* check interrupt_mask before read_index */
72         virt_rmb();
73         /*
74          * This is the only case we need to signal when the
75          * ring transitions from being empty to non-empty.
76          */
77         if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78                 vmbus_setevent(channel);
79 }
80
81 /* Get the next write location for the specified ring buffer. */
82 static inline u32
83 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84 {
85         u32 next = ring_info->ring_buffer->write_index;
86
87         return next;
88 }
89
90 /* Set the next write location for the specified ring buffer. */
91 static inline void
92 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93                      u32 next_write_location)
94 {
95         ring_info->ring_buffer->write_index = next_write_location;
96 }
97
98 /* Set the next read location for the specified ring buffer. */
99 static inline void
100 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101                     u32 next_read_location)
102 {
103         ring_info->ring_buffer->read_index = next_read_location;
104         ring_info->priv_read_index = next_read_location;
105 }
106
107 /* Get the size of the ring buffer. */
108 static inline u32
109 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110 {
111         return ring_info->ring_datasize;
112 }
113
114 /* Get the read and write indices as u64 of the specified ring buffer. */
115 static inline u64
116 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117 {
118         return (u64)ring_info->ring_buffer->write_index << 32;
119 }
120
121 /*
122  * Helper routine to copy from source to ring buffer.
123  * Assume there is enough room. Handles wrap-around in dest case only!!
124  */
125 static u32 hv_copyto_ringbuffer(
126         struct hv_ring_buffer_info      *ring_info,
127         u32                             start_write_offset,
128         const void                      *src,
129         u32                             srclen)
130 {
131         void *ring_buffer = hv_get_ring_buffer(ring_info);
132         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133
134         memcpy(ring_buffer + start_write_offset, src, srclen);
135
136         start_write_offset += srclen;
137         if (start_write_offset >= ring_buffer_size)
138                 start_write_offset -= ring_buffer_size;
139
140         return start_write_offset;
141 }
142
143 /*
144  *
145  * hv_get_ringbuffer_availbytes()
146  *
147  * Get number of bytes available to read and to write to
148  * for the specified ring buffer
149  */
150 static void
151 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
152                              u32 *read, u32 *write)
153 {
154         u32 read_loc, write_loc, dsize;
155
156         /* Capture the read/write indices before they changed */
157         read_loc = READ_ONCE(rbi->ring_buffer->read_index);
158         write_loc = READ_ONCE(rbi->ring_buffer->write_index);
159         dsize = rbi->ring_datasize;
160
161         *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
162                 read_loc - write_loc;
163         *read = dsize - *write;
164 }
165
166 /* Get various debug metrics for the specified ring buffer. */
167 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
168                                  struct hv_ring_buffer_debug_info *debug_info)
169 {
170         u32 bytes_avail_towrite;
171         u32 bytes_avail_toread;
172
173         if (ring_info->ring_buffer) {
174                 hv_get_ringbuffer_availbytes(ring_info,
175                                         &bytes_avail_toread,
176                                         &bytes_avail_towrite);
177
178                 debug_info->bytes_avail_toread = bytes_avail_toread;
179                 debug_info->bytes_avail_towrite = bytes_avail_towrite;
180                 debug_info->current_read_index =
181                         ring_info->ring_buffer->read_index;
182                 debug_info->current_write_index =
183                         ring_info->ring_buffer->write_index;
184                 debug_info->current_interrupt_mask =
185                         ring_info->ring_buffer->interrupt_mask;
186         }
187 }
188 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189
190 /* Initialize the ring buffer. */
191 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
192                        struct page *pages, u32 page_cnt)
193 {
194         int i;
195         struct page **pages_wraparound;
196
197         BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
198
199         memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
200
201         /*
202          * First page holds struct hv_ring_buffer, do wraparound mapping for
203          * the rest.
204          */
205         pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
206                                    GFP_KERNEL);
207         if (!pages_wraparound)
208                 return -ENOMEM;
209
210         pages_wraparound[0] = pages;
211         for (i = 0; i < 2 * (page_cnt - 1); i++)
212                 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
213
214         ring_info->ring_buffer = (struct hv_ring_buffer *)
215                 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
216
217         kfree(pages_wraparound);
218
219
220         if (!ring_info->ring_buffer)
221                 return -ENOMEM;
222
223         ring_info->ring_buffer->read_index =
224                 ring_info->ring_buffer->write_index = 0;
225
226         /* Set the feature bit for enabling flow control. */
227         ring_info->ring_buffer->feature_bits.value = 1;
228
229         ring_info->ring_size = page_cnt << PAGE_SHIFT;
230         ring_info->ring_size_div10_reciprocal =
231                 reciprocal_value(ring_info->ring_size / 10);
232         ring_info->ring_datasize = ring_info->ring_size -
233                 sizeof(struct hv_ring_buffer);
234
235         spin_lock_init(&ring_info->ring_lock);
236
237         return 0;
238 }
239
240 /* Cleanup the ring buffer. */
241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
242 {
243         vunmap(ring_info->ring_buffer);
244 }
245
246 /* Write to the ring buffer. */
247 int hv_ringbuffer_write(struct vmbus_channel *channel,
248                         const struct kvec *kv_list, u32 kv_count)
249 {
250         int i;
251         u32 bytes_avail_towrite;
252         u32 totalbytes_towrite = sizeof(u64);
253         u32 next_write_location;
254         u32 old_write;
255         u64 prev_indices;
256         unsigned long flags;
257         struct hv_ring_buffer_info *outring_info = &channel->outbound;
258
259         if (channel->rescind)
260                 return -ENODEV;
261
262         for (i = 0; i < kv_count; i++)
263                 totalbytes_towrite += kv_list[i].iov_len;
264
265         spin_lock_irqsave(&outring_info->ring_lock, flags);
266
267         bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
268
269         /*
270          * If there is only room for the packet, assume it is full.
271          * Otherwise, the next time around, we think the ring buffer
272          * is empty since the read index == write index.
273          */
274         if (bytes_avail_towrite <= totalbytes_towrite) {
275                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
276                 return -EAGAIN;
277         }
278
279         /* Write to the ring buffer */
280         next_write_location = hv_get_next_write_location(outring_info);
281
282         old_write = next_write_location;
283
284         for (i = 0; i < kv_count; i++) {
285                 next_write_location = hv_copyto_ringbuffer(outring_info,
286                                                      next_write_location,
287                                                      kv_list[i].iov_base,
288                                                      kv_list[i].iov_len);
289         }
290
291         /* Set previous packet start */
292         prev_indices = hv_get_ring_bufferindices(outring_info);
293
294         next_write_location = hv_copyto_ringbuffer(outring_info,
295                                              next_write_location,
296                                              &prev_indices,
297                                              sizeof(u64));
298
299         /* Issue a full memory barrier before updating the write index */
300         virt_mb();
301
302         /* Now, update the write location */
303         hv_set_next_write_location(outring_info, next_write_location);
304
305
306         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
307
308         hv_signal_on_write(old_write, channel);
309
310         if (channel->rescind)
311                 return -ENODEV;
312
313         return 0;
314 }
315
316 int hv_ringbuffer_read(struct vmbus_channel *channel,
317                        void *buffer, u32 buflen, u32 *buffer_actual_len,
318                        u64 *requestid, bool raw)
319 {
320         struct vmpacket_descriptor *desc;
321         u32 packetlen, offset;
322
323         if (unlikely(buflen == 0))
324                 return -EINVAL;
325
326         *buffer_actual_len = 0;
327         *requestid = 0;
328
329         /* Make sure there is something to read */
330         desc = hv_pkt_iter_first(channel);
331         if (desc == NULL) {
332                 /*
333                  * No error is set when there is even no header, drivers are
334                  * supposed to analyze buffer_actual_len.
335                  */
336                 return 0;
337         }
338
339         offset = raw ? 0 : (desc->offset8 << 3);
340         packetlen = (desc->len8 << 3) - offset;
341         *buffer_actual_len = packetlen;
342         *requestid = desc->trans_id;
343
344         if (unlikely(packetlen > buflen))
345                 return -ENOBUFS;
346
347         /* since ring is double mapped, only one copy is necessary */
348         memcpy(buffer, (const char *)desc + offset, packetlen);
349
350         /* Advance ring index to next packet descriptor */
351         __hv_pkt_iter_next(channel, desc);
352
353         /* Notify host of update */
354         hv_pkt_iter_close(channel);
355
356         return 0;
357 }
358
359 /*
360  * Determine number of bytes available in ring buffer after
361  * the current iterator (priv_read_index) location.
362  *
363  * This is similar to hv_get_bytes_to_read but with private
364  * read index instead.
365  */
366 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
367 {
368         u32 priv_read_loc = rbi->priv_read_index;
369         u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
370
371         if (write_loc >= priv_read_loc)
372                 return write_loc - priv_read_loc;
373         else
374                 return (rbi->ring_datasize - priv_read_loc) + write_loc;
375 }
376
377 /*
378  * Get first vmbus packet from ring buffer after read_index
379  *
380  * If ring buffer is empty, returns NULL and no other action needed.
381  */
382 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
383 {
384         struct hv_ring_buffer_info *rbi = &channel->inbound;
385         struct vmpacket_descriptor *desc;
386
387         if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
388                 return NULL;
389
390         desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
391         if (desc)
392                 prefetch((char *)desc + (desc->len8 << 3));
393
394         return desc;
395 }
396 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
397
398 /*
399  * Get next vmbus packet from ring buffer.
400  *
401  * Advances the current location (priv_read_index) and checks for more
402  * data. If the end of the ring buffer is reached, then return NULL.
403  */
404 struct vmpacket_descriptor *
405 __hv_pkt_iter_next(struct vmbus_channel *channel,
406                    const struct vmpacket_descriptor *desc)
407 {
408         struct hv_ring_buffer_info *rbi = &channel->inbound;
409         u32 packetlen = desc->len8 << 3;
410         u32 dsize = rbi->ring_datasize;
411
412         /* bump offset to next potential packet */
413         rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
414         if (rbi->priv_read_index >= dsize)
415                 rbi->priv_read_index -= dsize;
416
417         /* more data? */
418         return hv_pkt_iter_first(channel);
419 }
420 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
421
422 /* How many bytes were read in this iterator cycle */
423 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
424                                         u32 start_read_index)
425 {
426         if (rbi->priv_read_index >= start_read_index)
427                 return rbi->priv_read_index - start_read_index;
428         else
429                 return rbi->ring_datasize - start_read_index +
430                         rbi->priv_read_index;
431 }
432
433 /*
434  * Update host ring buffer after iterating over packets.
435  */
436 void hv_pkt_iter_close(struct vmbus_channel *channel)
437 {
438         struct hv_ring_buffer_info *rbi = &channel->inbound;
439         u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
440
441         /*
442          * Make sure all reads are done before we update the read index since
443          * the writer may start writing to the read area once the read index
444          * is updated.
445          */
446         virt_rmb();
447         start_read_index = rbi->ring_buffer->read_index;
448         rbi->ring_buffer->read_index = rbi->priv_read_index;
449
450         if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
451                 return;
452
453         /*
454          * Issue a full memory barrier before making the signaling decision.
455          * Here is the reason for having this barrier:
456          * If the reading of the pend_sz (in this function)
457          * were to be reordered and read before we commit the new read
458          * index (in the calling function)  we could
459          * have a problem. If the host were to set the pending_sz after we
460          * have sampled pending_sz and go to sleep before we commit the
461          * read index, we could miss sending the interrupt. Issue a full
462          * memory barrier to address this.
463          */
464         virt_mb();
465
466         pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
467         if (!pending_sz)
468                 return;
469
470         /*
471          * Ensure the read of write_index in hv_get_bytes_to_write()
472          * happens after the read of pending_send_sz.
473          */
474         virt_rmb();
475         curr_write_sz = hv_get_bytes_to_write(rbi);
476         bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
477
478         /*
479          * If there was space before we began iteration,
480          * then host was not blocked.
481          */
482
483         if (curr_write_sz - bytes_read > pending_sz)
484                 return;
485
486         /* If pending write will not fit, don't give false hope. */
487         if (curr_write_sz <= pending_sz)
488                 return;
489
490         vmbus_setevent(channel);
491 }
492 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);