Release 4.11 drivers/hv/ring_buffer.c
/*
*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "hyperv_vmbus.h"
/*
* When we write to the ring buffer, check if the host needs to
* be signaled. Here is the details of this protocol:
*
* 1. The host guarantees that while it is draining the
* ring buffer, it will set the interrupt_mask to
* indicate it does not need to be interrupted when
* new data is placed.
*
* 2. The host guarantees that it will completely drain
* the ring buffer before exiting the read loop. Further,
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
*
* KYS: Oct. 30, 2016:
* It looks like Windows hosts have logic to deal with DOS attacks that
* can be triggered if it receives interrupts when it is not expecting
* the interrupt. The host expects interrupts only when the ring
* transitions from empty to non-empty (or full to non full on the guest
* to host ring).
* So, base the signaling decision solely on the ring state until the
* host logic is fixed.
*/
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->outbound;
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
return;
/* check interrupt_mask before read_index */
virt_rmb();
/*
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
vmbus_setevent(channel);
return;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 60 | 95.24% | 5 | 83.33% |
Jason (Hui) Wang | 3 | 4.76% | 1 | 16.67% |
Total | 63 | 100.00% | 6 | 100.00% |
/* Get the next write location for the specified ring buffer. */
static inline u32
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
{
u32 next = ring_info->ring_buffer->write_index;
return next;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 15 | 62.50% | 1 | 16.67% |
Haiyang Zhang | 4 | 16.67% | 2 | 33.33% |
Greg Kroah-Hartman | 4 | 16.67% | 2 | 33.33% |
K. Y. Srinivasan | 1 | 4.17% | 1 | 16.67% |
Total | 24 | 100.00% | 6 | 100.00% |
/* Set the next write location for the specified ring buffer. */
static inline void
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
u32 next_write_location)
{
ring_info->ring_buffer->write_index = next_write_location;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 13 | 56.52% | 1 | 16.67% |
Haiyang Zhang | 6 | 26.09% | 2 | 33.33% |
Greg Kroah-Hartman | 3 | 13.04% | 2 | 33.33% |
K. Y. Srinivasan | 1 | 4.35% | 1 | 16.67% |
Total | 23 | 100.00% | 6 | 100.00% |
/* Get the next read location for the specified ring buffer. */
static inline u32
hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
{
return ring_info->ring_buffer->read_index;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 10 | 50.00% | 1 | 14.29% |
Haiyang Zhang | 4 | 20.00% | 2 | 28.57% |
Greg Kroah-Hartman | 3 | 15.00% | 2 | 28.57% |
Stephen Hemminger | 2 | 10.00% | 1 | 14.29% |
K. Y. Srinivasan | 1 | 5.00% | 1 | 14.29% |
Total | 20 | 100.00% | 7 | 100.00% |
/*
* Get the next read location + offset for the specified ring buffer.
* This allows the caller to skip.
*/
static inline u32
hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
u32 offset)
{
u32 next = ring_info->ring_buffer->read_index;
next += offset;
if (next >= ring_info->ring_datasize)
next -= ring_info->ring_datasize;
return next;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 22 | 47.83% | 1 | 12.50% |
Stephen Hemminger | 10 | 21.74% | 2 | 25.00% |
Haiyang Zhang | 8 | 17.39% | 2 | 25.00% |
Greg Kroah-Hartman | 5 | 10.87% | 2 | 25.00% |
K. Y. Srinivasan | 1 | 2.17% | 1 | 12.50% |
Total | 46 | 100.00% | 8 | 100.00% |
/* Set the next read location for the specified ring buffer. */
static inline void
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
u32 next_read_location)
{
ring_info->ring_buffer->read_index = next_read_location;
ring_info->priv_read_index = next_read_location;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 13 | 44.83% | 1 | 14.29% |
K. Y. Srinivasan | 7 | 24.14% | 2 | 28.57% |
Haiyang Zhang | 6 | 20.69% | 2 | 28.57% |
Greg Kroah-Hartman | 3 | 10.34% | 2 | 28.57% |
Total | 29 | 100.00% | 7 | 100.00% |
/* Get the size of the ring buffer. */
static inline u32
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
{
return ring_info->ring_datasize;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 10 | 55.56% | 1 | 14.29% |
Haiyang Zhang | 3 | 16.67% | 2 | 28.57% |
Greg Kroah-Hartman | 3 | 16.67% | 2 | 28.57% |
K. Y. Srinivasan | 1 | 5.56% | 1 | 14.29% |
Stephen Hemminger | 1 | 5.56% | 1 | 14.29% |
Total | 18 | 100.00% | 7 | 100.00% |
/* Get the read and write indices as u64 of the specified ring buffer. */
static inline u64
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
{
return (u64)ring_info->ring_buffer->write_index << 32;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 15 | 62.50% | 1 | 16.67% |
Haiyang Zhang | 4 | 16.67% | 2 | 33.33% |
Greg Kroah-Hartman | 4 | 16.67% | 2 | 33.33% |
K. Y. Srinivasan | 1 | 4.17% | 1 | 16.67% |
Total | 24 | 100.00% | 6 | 100.00% |
/*
* Helper routine to copy to source from ring buffer.
* Assume there is enough room. Handles wrap-around in src case only!!
*/
static u32 hv_copyfrom_ringbuffer(
const struct hv_ring_buffer_info *ring_info,
void *dest,
u32 destlen,
u32 start_read_offset)
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
memcpy(dest, ring_buffer + start_read_offset, destlen);
start_read_offset += destlen;
if (start_read_offset >= ring_buffer_size)
start_read_offset -= ring_buffer_size;
return start_read_offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 46 | 68.66% | 1 | 12.50% |
Stephen Hemminger | 8 | 11.94% | 2 | 25.00% |
Hank Janssen | 6 | 8.96% | 1 | 12.50% |
Greg Kroah-Hartman | 6 | 8.96% | 3 | 37.50% |
Haiyang Zhang | 1 | 1.49% | 1 | 12.50% |
Total | 67 | 100.00% | 8 | 100.00% |
/*
* Helper routine to copy from source to ring buffer.
* Assume there is enough room. Handles wrap-around in dest case only!!
*/
static u32 hv_copyto_ringbuffer(
struct hv_ring_buffer_info *ring_info,
u32 start_write_offset,
const void *src,
u32 srclen)
{
void *ring_buffer = hv_get_ring_buffer(ring_info);
u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
memcpy(ring_buffer + start_write_offset, src, srclen);
start_write_offset += srclen;
if (start_write_offset >= ring_buffer_size)
start_write_offset -= ring_buffer_size;
return start_write_offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 46 | 68.66% | 2 | 22.22% |
Stephen Hemminger | 8 | 11.94% | 2 | 22.22% |
Greg Kroah-Hartman | 6 | 8.96% | 3 | 33.33% |
Hank Janssen | 6 | 8.96% | 1 | 11.11% |
Haiyang Zhang | 1 | 1.49% | 1 | 11.11% |
Total | 67 | 100.00% | 9 | 100.00% |
/* Get various debug metrics for the specified ring buffer. */
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
if (ring_info->ring_buffer) {
hv_get_ringbuffer_availbytes(ring_info,
&bytes_avail_toread,
&bytes_avail_towrite);
debug_info->bytes_avail_toread = bytes_avail_toread;
debug_info->bytes_avail_towrite = bytes_avail_towrite;
debug_info->current_read_index =
ring_info->ring_buffer->read_index;
debug_info->current_write_index =
ring_info->ring_buffer->write_index;
debug_info->current_interrupt_mask =
ring_info->ring_buffer->interrupt_mask;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 43 | 51.81% | 1 | 10.00% |
Haiyang Zhang | 24 | 28.92% | 2 | 20.00% |
Greg Kroah-Hartman | 13 | 15.66% | 4 | 40.00% |
K. Y. Srinivasan | 2 | 2.41% | 2 | 20.00% |
Stephen Hemminger | 1 | 1.20% | 1 | 10.00% |
Total | 83 | 100.00% | 10 | 100.00% |
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt)
{
int i;
struct page **pages_wraparound;
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
/*
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
*/
pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
GFP_KERNEL);
if (!pages_wraparound)
return -ENOMEM;
pages_wraparound[0] = pages;
for (i = 0; i < 2 * (page_cnt - 1); i++)
pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
ring_info->ring_buffer = (struct hv_ring_buffer *)
vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
kfree(pages_wraparound);
if (!ring_info->ring_buffer)
return -ENOMEM;
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;
/* Set the feature bit for enabling flow control. */
ring_info->ring_buffer->feature_bits.value = 1;
ring_info->ring_size = page_cnt << PAGE_SHIFT;
ring_info->ring_datasize = ring_info->ring_size -
sizeof(struct hv_ring_buffer);
spin_lock_init(&ring_info->ring_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Kuznetsov | 135 | 58.70% | 2 | 15.38% |
Hank Janssen | 51 | 22.17% | 1 | 7.69% |
Greg Kroah-Hartman | 17 | 7.39% | 5 | 38.46% |
Haiyang Zhang | 15 | 6.52% | 2 | 15.38% |
K. Y. Srinivasan | 11 | 4.78% | 2 | 15.38% |
Bill Pemberton | 1 | 0.43% | 1 | 7.69% |
Total | 230 | 100.00% | 13 | 100.00% |
/* Cleanup the ring buffer. */
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
vunmap(ring_info->ring_buffer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Kuznetsov | 9 | 52.94% | 1 | 16.67% |
Hank Janssen | 3 | 17.65% | 1 | 16.67% |
Greg Kroah-Hartman | 3 | 17.65% | 2 | 33.33% |
K. Y. Srinivasan | 1 | 5.88% | 1 | 16.67% |
Haiyang Zhang | 1 | 5.88% | 1 | 16.67% |
Total | 17 | 100.00% | 6 | 100.00% |
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count)
{
int i = 0;
u32 bytes_avail_towrite;
u32 totalbytes_towrite = 0;
u32 next_write_location;
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
if (channel->rescind)
return -ENODEV;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
spin_lock_irqsave(&outring_info->ring_lock, flags);
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
/*
* If there is only room for the packet, assume it is full.
* Otherwise, the next time around, we think the ring buffer
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
old_write = next_write_location;
for (i = 0; i < kv_count; i++) {
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
kv_list[i].iov_base,
kv_list[i].iov_len);
}
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
next_write_location = hv_copyto_ringbuffer(outring_info,
next_write_location,
&prev_indices,
sizeof(u64));
/* Issue a full memory barrier before updating the write index */
virt_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
hv_signal_on_write(old_write, channel);
if (channel->rescind)
return -ENODEV;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 107 | 41.96% | 10 | 47.62% |
Hank Janssen | 88 | 34.51% | 1 | 4.76% |
Greg Kroah-Hartman | 28 | 10.98% | 5 | 23.81% |
Haiyang Zhang | 25 | 9.80% | 1 | 4.76% |
Bill Pemberton | 3 | 1.18% | 1 | 4.76% |
Nicolas Palix | 2 | 0.78% | 1 | 4.76% |
Vitaly Kuznetsov | 1 | 0.39% | 1 | 4.76% |
Stephen Hemminger | 1 | 0.39% | 1 | 4.76% |
Total | 255 | 100.00% | 21 | 100.00% |
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
struct vmpacket_descriptor desc;
u32 offset;
u32 packetlen;
int ret = 0;
struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
*buffer_actual_len = 0;
*requestid = 0;
bytes_avail_toread = hv_get_bytes_to_read(inring_info);
/* Make sure there is something to read */
if (bytes_avail_toread < sizeof(desc)) {
/*
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
*/
return ret;
}
init_cached_read_index(channel);
next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc),
next_read_location);
offset = raw ? 0 : (desc.offset8 << 3);
packetlen = (desc.len8 << 3) - offset;
*buffer_actual_len = packetlen;
*requestid = desc.trans_id;
if (bytes_avail_toread < packetlen + offset)
return -EAGAIN;
if (packetlen > buflen)
return -ENOBUFS;
next_read_location =
hv_get_next_readlocation_withoffset(inring_info, offset);
next_read_location = hv_copyfrom_ringbuffer(inring_info,
buffer,
packetlen,
next_read_location);
next_read_location = hv_copyfrom_ringbuffer(inring_info,
&prev_indices,
sizeof(u64),
next_read_location);
/*
* Make sure all reads are done before we update the read index since
* the writer may start writing to the read area once the read index
* is updated.
*/
virt_mb();
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
hv_signal_on_read(channel);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Kuznetsov | 119 | 46.30% | 3 | 14.29% |
Hank Janssen | 58 | 22.57% | 1 | 4.76% |
K. Y. Srinivasan | 32 | 12.45% | 7 | 33.33% |
Haiyang Zhang | 22 | 8.56% | 1 | 4.76% |
Greg Kroah-Hartman | 11 | 4.28% | 6 | 28.57% |
Bill Pemberton | 10 | 3.89% | 2 | 9.52% |
Dexuan Cui | 5 | 1.95% | 1 | 4.76% |
Total | 257 | 100.00% | 21 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 362 | 28.59% | 2 | 3.92% |
K. Y. Srinivasan | 326 | 25.75% | 25 | 49.02% |
Vitaly Kuznetsov | 283 | 22.35% | 4 | 7.84% |
Haiyang Zhang | 124 | 9.79% | 2 | 3.92% |
Greg Kroah-Hartman | 116 | 9.16% | 10 | 19.61% |
Stephen Hemminger | 31 | 2.45% | 2 | 3.92% |
Bill Pemberton | 14 | 1.11% | 3 | 5.88% |
Dexuan Cui | 5 | 0.39% | 1 | 1.96% |
Jason (Hui) Wang | 3 | 0.24% | 1 | 1.96% |
Nicolas Palix | 2 | 0.16% | 1 | 1.96% |
Total | 1266 | 100.00% | 51 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.