ecd82ebfd5bc4f49077a8dbb0aaa3b85c32fca67
[linux-2.6-microblaze.git] / drivers / hv / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (c) 2009, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <haiyangz@microsoft.com>
8  *   Hank Janssen  <hjanssen@microsoft.com>
9  *   K. Y. Srinivasan <kys@microsoft.com>
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
20
21 #include "hyperv_vmbus.h"
22
23 #define VMBUS_PKT_TRAILER       8
24
25 /*
26  * When we write to the ring buffer, check if the host needs to
27  * be signaled. Here is the details of this protocol:
28  *
29  *      1. The host guarantees that while it is draining the
30  *         ring buffer, it will set the interrupt_mask to
31  *         indicate it does not need to be interrupted when
32  *         new data is placed.
33  *
34  *      2. The host guarantees that it will completely drain
35  *         the ring buffer before exiting the read loop. Further,
36  *         once the ring buffer is empty, it will clear the
37  *         interrupt_mask and re-check to see if new data has
38  *         arrived.
39  *
40  * KYS: Oct. 30, 2016:
41  * It looks like Windows hosts have logic to deal with DOS attacks that
42  * can be triggered if it receives interrupts when it is not expecting
43  * the interrupt. The host expects interrupts only when the ring
44  * transitions from empty to non-empty (or full to non full on the guest
45  * to host ring).
46  * So, base the signaling decision solely on the ring state until the
47  * host logic is fixed.
48  */
49
50 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
51 {
52         struct hv_ring_buffer_info *rbi = &channel->outbound;
53
54         virt_mb();
55         if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
56                 return;
57
58         /* check interrupt_mask before read_index */
59         virt_rmb();
60         /*
61          * This is the only case we need to signal when the
62          * ring transitions from being empty to non-empty.
63          */
64         if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
65                 ++channel->intr_out_empty;
66                 vmbus_setevent(channel);
67         }
68 }
69
70 /* Get the next write location for the specified ring buffer. */
71 static inline u32
72 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
73 {
74         u32 next = ring_info->ring_buffer->write_index;
75
76         return next;
77 }
78
79 /* Set the next write location for the specified ring buffer. */
80 static inline void
81 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
82                      u32 next_write_location)
83 {
84         ring_info->ring_buffer->write_index = next_write_location;
85 }
86
87 /* Set the next read location for the specified ring buffer. */
88 static inline void
89 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
90                     u32 next_read_location)
91 {
92         ring_info->ring_buffer->read_index = next_read_location;
93         ring_info->priv_read_index = next_read_location;
94 }
95
96 /* Get the size of the ring buffer. */
97 static inline u32
98 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
99 {
100         return ring_info->ring_datasize;
101 }
102
103 /* Get the read and write indices as u64 of the specified ring buffer. */
104 static inline u64
105 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
106 {
107         return (u64)ring_info->ring_buffer->write_index << 32;
108 }
109
110 /*
111  * Helper routine to copy from source to ring buffer.
112  * Assume there is enough room. Handles wrap-around in dest case only!!
113  */
114 static u32 hv_copyto_ringbuffer(
115         struct hv_ring_buffer_info      *ring_info,
116         u32                             start_write_offset,
117         const void                      *src,
118         u32                             srclen)
119 {
120         void *ring_buffer = hv_get_ring_buffer(ring_info);
121         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
122
123         memcpy(ring_buffer + start_write_offset, src, srclen);
124
125         start_write_offset += srclen;
126         if (start_write_offset >= ring_buffer_size)
127                 start_write_offset -= ring_buffer_size;
128
129         return start_write_offset;
130 }
131
132 /*
133  *
134  * hv_get_ringbuffer_availbytes()
135  *
136  * Get number of bytes available to read and to write to
137  * for the specified ring buffer
138  */
139 static void
140 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
141                              u32 *read, u32 *write)
142 {
143         u32 read_loc, write_loc, dsize;
144
145         /* Capture the read/write indices before they changed */
146         read_loc = READ_ONCE(rbi->ring_buffer->read_index);
147         write_loc = READ_ONCE(rbi->ring_buffer->write_index);
148         dsize = rbi->ring_datasize;
149
150         *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
151                 read_loc - write_loc;
152         *read = dsize - *write;
153 }
154
155 /* Get various debug metrics for the specified ring buffer. */
156 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
157                                 struct hv_ring_buffer_debug_info *debug_info)
158 {
159         u32 bytes_avail_towrite;
160         u32 bytes_avail_toread;
161
162         mutex_lock(&ring_info->ring_buffer_mutex);
163
164         if (!ring_info->ring_buffer) {
165                 mutex_unlock(&ring_info->ring_buffer_mutex);
166                 return -EINVAL;
167         }
168
169         hv_get_ringbuffer_availbytes(ring_info,
170                                      &bytes_avail_toread,
171                                      &bytes_avail_towrite);
172         debug_info->bytes_avail_toread = bytes_avail_toread;
173         debug_info->bytes_avail_towrite = bytes_avail_towrite;
174         debug_info->current_read_index = ring_info->ring_buffer->read_index;
175         debug_info->current_write_index = ring_info->ring_buffer->write_index;
176         debug_info->current_interrupt_mask
177                 = ring_info->ring_buffer->interrupt_mask;
178         mutex_unlock(&ring_info->ring_buffer_mutex);
179
180         return 0;
181 }
182 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
183
184 /* Initialize a channel's ring buffer info mutex locks */
185 void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
186 {
187         mutex_init(&channel->inbound.ring_buffer_mutex);
188         mutex_init(&channel->outbound.ring_buffer_mutex);
189 }
190
191 /* Initialize the ring buffer. */
192 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
193                        struct page *pages, u32 page_cnt)
194 {
195         int i;
196         struct page **pages_wraparound;
197
198         BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
199
200         /*
201          * First page holds struct hv_ring_buffer, do wraparound mapping for
202          * the rest.
203          */
204         pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
205                                    GFP_KERNEL);
206         if (!pages_wraparound)
207                 return -ENOMEM;
208
209         pages_wraparound[0] = pages;
210         for (i = 0; i < 2 * (page_cnt - 1); i++)
211                 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
212
213         ring_info->ring_buffer = (struct hv_ring_buffer *)
214                 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
215
216         kfree(pages_wraparound);
217
218
219         if (!ring_info->ring_buffer)
220                 return -ENOMEM;
221
222         ring_info->ring_buffer->read_index =
223                 ring_info->ring_buffer->write_index = 0;
224
225         /* Set the feature bit for enabling flow control. */
226         ring_info->ring_buffer->feature_bits.value = 1;
227
228         ring_info->ring_size = page_cnt << PAGE_SHIFT;
229         ring_info->ring_size_div10_reciprocal =
230                 reciprocal_value(ring_info->ring_size / 10);
231         ring_info->ring_datasize = ring_info->ring_size -
232                 sizeof(struct hv_ring_buffer);
233         ring_info->priv_read_index = 0;
234
235         spin_lock_init(&ring_info->ring_lock);
236
237         return 0;
238 }
239
240 /* Cleanup the ring buffer. */
241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
242 {
243         mutex_lock(&ring_info->ring_buffer_mutex);
244         vunmap(ring_info->ring_buffer);
245         ring_info->ring_buffer = NULL;
246         mutex_unlock(&ring_info->ring_buffer_mutex);
247 }
248
249 /* Write to the ring buffer. */
250 int hv_ringbuffer_write(struct vmbus_channel *channel,
251                         const struct kvec *kv_list, u32 kv_count,
252                         u64 requestid)
253 {
254         int i;
255         u32 bytes_avail_towrite;
256         u32 totalbytes_towrite = sizeof(u64);
257         u32 next_write_location;
258         u32 old_write;
259         u64 prev_indices;
260         unsigned long flags;
261         struct hv_ring_buffer_info *outring_info = &channel->outbound;
262         struct vmpacket_descriptor *desc = kv_list[0].iov_base;
263         u64 rqst_id = VMBUS_NO_RQSTOR;
264
265         if (channel->rescind)
266                 return -ENODEV;
267
268         for (i = 0; i < kv_count; i++)
269                 totalbytes_towrite += kv_list[i].iov_len;
270
271         spin_lock_irqsave(&outring_info->ring_lock, flags);
272
273         bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
274
275         /*
276          * If there is only room for the packet, assume it is full.
277          * Otherwise, the next time around, we think the ring buffer
278          * is empty since the read index == write index.
279          */
280         if (bytes_avail_towrite <= totalbytes_towrite) {
281                 ++channel->out_full_total;
282
283                 if (!channel->out_full_flag) {
284                         ++channel->out_full_first;
285                         channel->out_full_flag = true;
286                 }
287
288                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
289                 return -EAGAIN;
290         }
291
292         channel->out_full_flag = false;
293
294         /* Write to the ring buffer */
295         next_write_location = hv_get_next_write_location(outring_info);
296
297         old_write = next_write_location;
298
299         for (i = 0; i < kv_count; i++) {
300                 next_write_location = hv_copyto_ringbuffer(outring_info,
301                                                      next_write_location,
302                                                      kv_list[i].iov_base,
303                                                      kv_list[i].iov_len);
304         }
305
306         /*
307          * Allocate the request ID after the data has been copied into the
308          * ring buffer.  Once this request ID is allocated, the completion
309          * path could find the data and free it.
310          */
311
312         if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
313                 rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
314                 if (rqst_id == VMBUS_RQST_ERROR) {
315                         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
316                         return -EAGAIN;
317                 }
318         }
319         desc = hv_get_ring_buffer(outring_info) + old_write;
320         desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
321
322         /* Set previous packet start */
323         prev_indices = hv_get_ring_bufferindices(outring_info);
324
325         next_write_location = hv_copyto_ringbuffer(outring_info,
326                                              next_write_location,
327                                              &prev_indices,
328                                              sizeof(u64));
329
330         /* Issue a full memory barrier before updating the write index */
331         virt_mb();
332
333         /* Now, update the write location */
334         hv_set_next_write_location(outring_info, next_write_location);
335
336
337         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
338
339         hv_signal_on_write(old_write, channel);
340
341         if (channel->rescind) {
342                 if (rqst_id != VMBUS_NO_RQSTOR) {
343                         /* Reclaim request ID to avoid leak of IDs */
344                         vmbus_request_addr(&channel->requestor, rqst_id);
345                 }
346                 return -ENODEV;
347         }
348
349         return 0;
350 }
351
352 int hv_ringbuffer_read(struct vmbus_channel *channel,
353                        void *buffer, u32 buflen, u32 *buffer_actual_len,
354                        u64 *requestid, bool raw)
355 {
356         struct vmpacket_descriptor *desc;
357         u32 packetlen, offset;
358
359         if (unlikely(buflen == 0))
360                 return -EINVAL;
361
362         *buffer_actual_len = 0;
363         *requestid = 0;
364
365         /* Make sure there is something to read */
366         desc = hv_pkt_iter_first(channel);
367         if (desc == NULL) {
368                 /*
369                  * No error is set when there is even no header, drivers are
370                  * supposed to analyze buffer_actual_len.
371                  */
372                 return 0;
373         }
374
375         offset = raw ? 0 : (desc->offset8 << 3);
376         packetlen = (desc->len8 << 3) - offset;
377         *buffer_actual_len = packetlen;
378         *requestid = desc->trans_id;
379
380         if (unlikely(packetlen > buflen))
381                 return -ENOBUFS;
382
383         /* since ring is double mapped, only one copy is necessary */
384         memcpy(buffer, (const char *)desc + offset, packetlen);
385
386         /* Advance ring index to next packet descriptor */
387         __hv_pkt_iter_next(channel, desc);
388
389         /* Notify host of update */
390         hv_pkt_iter_close(channel);
391
392         return 0;
393 }
394
395 /*
396  * Determine number of bytes available in ring buffer after
397  * the current iterator (priv_read_index) location.
398  *
399  * This is similar to hv_get_bytes_to_read but with private
400  * read index instead.
401  */
402 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
403 {
404         u32 priv_read_loc = rbi->priv_read_index;
405         u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
406
407         if (write_loc >= priv_read_loc)
408                 return write_loc - priv_read_loc;
409         else
410                 return (rbi->ring_datasize - priv_read_loc) + write_loc;
411 }
412
413 /*
414  * Get first vmbus packet from ring buffer after read_index
415  *
416  * If ring buffer is empty, returns NULL and no other action needed.
417  */
418 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
419 {
420         struct hv_ring_buffer_info *rbi = &channel->inbound;
421         struct vmpacket_descriptor *desc;
422
423         hv_debug_delay_test(channel, MESSAGE_DELAY);
424         if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
425                 return NULL;
426
427         desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
428         if (desc)
429                 prefetch((char *)desc + (desc->len8 << 3));
430
431         return desc;
432 }
433 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
434
435 /*
436  * Get next vmbus packet from ring buffer.
437  *
438  * Advances the current location (priv_read_index) and checks for more
439  * data. If the end of the ring buffer is reached, then return NULL.
440  */
441 struct vmpacket_descriptor *
442 __hv_pkt_iter_next(struct vmbus_channel *channel,
443                    const struct vmpacket_descriptor *desc)
444 {
445         struct hv_ring_buffer_info *rbi = &channel->inbound;
446         u32 packetlen = desc->len8 << 3;
447         u32 dsize = rbi->ring_datasize;
448
449         hv_debug_delay_test(channel, MESSAGE_DELAY);
450         /* bump offset to next potential packet */
451         rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
452         if (rbi->priv_read_index >= dsize)
453                 rbi->priv_read_index -= dsize;
454
455         /* more data? */
456         return hv_pkt_iter_first(channel);
457 }
458 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
459
460 /* How many bytes were read in this iterator cycle */
461 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
462                                         u32 start_read_index)
463 {
464         if (rbi->priv_read_index >= start_read_index)
465                 return rbi->priv_read_index - start_read_index;
466         else
467                 return rbi->ring_datasize - start_read_index +
468                         rbi->priv_read_index;
469 }
470
471 /*
472  * Update host ring buffer after iterating over packets. If the host has
473  * stopped queuing new entries because it found the ring buffer full, and
474  * sufficient space is being freed up, signal the host. But be careful to
475  * only signal the host when necessary, both for performance reasons and
476  * because Hyper-V protects itself by throttling guests that signal
477  * inappropriately.
478  *
479  * Determining when to signal is tricky. There are three key data inputs
480  * that must be handled in this order to avoid race conditions:
481  *
482  * 1. Update the read_index
483  * 2. Read the pending_send_sz
484  * 3. Read the current write_index
485  *
486  * The interrupt_mask is not used to determine when to signal. The
487  * interrupt_mask is used only on the guest->host ring buffer when
488  * sending requests to the host. The host does not use it on the host->
489  * guest ring buffer to indicate whether it should be signaled.
490  */
491 void hv_pkt_iter_close(struct vmbus_channel *channel)
492 {
493         struct hv_ring_buffer_info *rbi = &channel->inbound;
494         u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
495
496         /*
497          * Make sure all reads are done before we update the read index since
498          * the writer may start writing to the read area once the read index
499          * is updated.
500          */
501         virt_rmb();
502         start_read_index = rbi->ring_buffer->read_index;
503         rbi->ring_buffer->read_index = rbi->priv_read_index;
504
505         /*
506          * Older versions of Hyper-V (before WS2102 and Win8) do not
507          * implement pending_send_sz and simply poll if the host->guest
508          * ring buffer is full.  No signaling is needed or expected.
509          */
510         if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
511                 return;
512
513         /*
514          * Issue a full memory barrier before making the signaling decision.
515          * If reading pending_send_sz were to be reordered and happen
516          * before we commit the new read_index, a race could occur.  If the
517          * host were to set the pending_send_sz after we have sampled
518          * pending_send_sz, and the ring buffer blocks before we commit the
519          * read index, we could miss sending the interrupt. Issue a full
520          * memory barrier to address this.
521          */
522         virt_mb();
523
524         /*
525          * If the pending_send_sz is zero, then the ring buffer is not
526          * blocked and there is no need to signal.  This is far by the
527          * most common case, so exit quickly for best performance.
528          */
529         pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
530         if (!pending_sz)
531                 return;
532
533         /*
534          * Ensure the read of write_index in hv_get_bytes_to_write()
535          * happens after the read of pending_send_sz.
536          */
537         virt_rmb();
538         curr_write_sz = hv_get_bytes_to_write(rbi);
539         bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
540
541         /*
542          * We want to signal the host only if we're transitioning
543          * from a "not enough free space" state to a "enough free
544          * space" state.  For example, it's possible that this function
545          * could run and free up enough space to signal the host, and then
546          * run again and free up additional space before the host has a
547          * chance to clear the pending_send_sz.  The 2nd invocation would
548          * be a null transition from "enough free space" to "enough free
549          * space", which doesn't warrant a signal.
550          *
551          * Exactly filling the ring buffer is treated as "not enough
552          * space". The ring buffer always must have at least one byte
553          * empty so the empty and full conditions are distinguishable.
554          * hv_get_bytes_to_write() doesn't fully tell the truth in
555          * this regard.
556          *
557          * So first check if we were in the "enough free space" state
558          * before we began the iteration. If so, the host was not
559          * blocked, and there's no need to signal.
560          */
561         if (curr_write_sz - bytes_read > pending_sz)
562                 return;
563
564         /*
565          * Similarly, if the new state is "not enough space", then
566          * there's no need to signal.
567          */
568         if (curr_write_sz <= pending_sz)
569                 return;
570
571         ++channel->intr_in_full;
572         vmbus_setevent(channel);
573 }
574 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);