cregit-Linux how code gets into the kernel

Release 4.12 include/linux/ptr_ring.h

Directory: include/linux
/*
 *      Definitions for the 'struct ptr_ring' datastructure.
 *
 *      Author:
 *              Michael S. Tsirkin <mst@redhat.com>
 *
 *      Copyright (C) 2016 Red Hat, Inc.
 *
 *      This program is free software; you can redistribute it and/or modify it
 *      under the terms of the GNU General Public License as published by the
 *      Free Software Foundation; either version 2 of the License, or (at your
 *      option) any later version.
 *
 *      This is a limited-size FIFO maintaining pointers in FIFO order, with
 *      one CPU producing entries and another consuming entries from a FIFO.
 *
 *      This implementation tries to minimize cache-contention when there is a
 *      single producer and a single consumer CPU.
 */

#ifndef _LINUX_PTR_RING_H

#define _LINUX_PTR_RING_H 1

#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <asm/errno.h>
#endif


struct ptr_ring {
	
int producer ____cacheline_aligned_in_smp;
	
spinlock_t producer_lock;
	
int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
	
int consumer_tail; /* next entry to invalidate */
	
spinlock_t consumer_lock;
	/* Shared consumer/producer data */
	/* Read-only by both the producer and the consumer */
	
int size ____cacheline_aligned_in_smp; /* max entries in queue */
	
int batch; /* number of entries to consume in a batch */
	
void **queue;
};

/* Note: callers invoking this in a loop must use a compiler barrier,
 * for example cpu_relax().  If ring is ever resized, callers must hold
 * producer_lock - see e.g. ptr_ring_full.  Otherwise, if callers don't hold
 * producer_lock, the next call to __ptr_ring_produce may fail.
 */

static inline bool __ptr_ring_full(struct ptr_ring *r) { return r->queue[r->producer]; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin22100.00%1100.00%
Total22100.00%1100.00%


static inline bool ptr_ring_full(struct ptr_ring *r) { bool ret; spin_lock(&r->producer_lock); ret = __ptr_ring_full(r); spin_unlock(&r->producer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%2100.00%
Total41100.00%2100.00%


static inline bool ptr_ring_full_irq(struct ptr_ring *r) { bool ret; spin_lock_irq(&r->producer_lock); ret = __ptr_ring_full(r); spin_unlock_irq(&r->producer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%1100.00%
Total41100.00%1100.00%


static inline bool ptr_ring_full_any(struct ptr_ring *r) { unsigned long flags; bool ret; spin_lock_irqsave(&r->producer_lock, flags); ret = __ptr_ring_full(r); spin_unlock_irqrestore(&r->producer_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin49100.00%2100.00%
Total49100.00%2100.00%


static inline bool ptr_ring_full_bh(struct ptr_ring *r) { bool ret; spin_lock_bh(&r->producer_lock); ret = __ptr_ring_full(r); spin_unlock_bh(&r->producer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%2100.00%
Total41100.00%2100.00%

/* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. */
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) r->producer = 0; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin6589.04%266.67%
Jason (Hui) Wang810.96%133.33%
Total73100.00%3100.00%

/* * Note: resize (below) nests producer lock within consumer lock, so if you * consume in interrupt or BH context, you must disable interrupts/BH when * calling this. */
static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) { int ret; spin_lock(&r->producer_lock); ret = __ptr_ring_produce(r, ptr); spin_unlock(&r->producer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin47100.00%1100.00%
Total47100.00%1100.00%


static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) { int ret; spin_lock_irq(&r->producer_lock); ret = __ptr_ring_produce(r, ptr); spin_unlock_irq(&r->producer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin47100.00%1100.00%
Total47100.00%1100.00%


static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) { unsigned long flags; int ret; spin_lock_irqsave(&r->producer_lock, flags); ret = __ptr_ring_produce(r, ptr); spin_unlock_irqrestore(&r->producer_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin55100.00%1100.00%
Total55100.00%1100.00%


static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) { int ret; spin_lock_bh(&r->producer_lock); ret = __ptr_ring_produce(r, ptr); spin_unlock_bh(&r->producer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin47100.00%1100.00%
Total47100.00%1100.00%

/* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must take consumer_lock * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. * If ring is never resized, and if the pointer is merely * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. */
static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) return r->queue[r->consumer_head]; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin2365.71%266.67%
Jason (Hui) Wang1234.29%133.33%
Total35100.00%3100.00%

/* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must take consumer_lock * if the ring is ever resized - see e.g. ptr_ring_empty. */
static inline bool __ptr_ring_empty(struct ptr_ring *r) { return !__ptr_ring_peek(r); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin19100.00%1100.00%
Total19100.00%1100.00%


static inline bool ptr_ring_empty(struct ptr_ring *r) { bool ret; spin_lock(&r->consumer_lock); ret = __ptr_ring_empty(r); spin_unlock(&r->consumer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%2100.00%
Total41100.00%2100.00%


static inline bool ptr_ring_empty_irq(struct ptr_ring *r) { bool ret; spin_lock_irq(&r->consumer_lock); ret = __ptr_ring_empty(r); spin_unlock_irq(&r->consumer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%1100.00%
Total41100.00%1100.00%


static inline bool ptr_ring_empty_any(struct ptr_ring *r) { unsigned long flags; bool ret; spin_lock_irqsave(&r->consumer_lock, flags); ret = __ptr_ring_empty(r); spin_unlock_irqrestore(&r->consumer_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin49100.00%1100.00%
Total49100.00%1100.00%


static inline bool ptr_ring_empty_bh(struct ptr_ring *r) { bool ret; spin_lock_bh(&r->consumer_lock); ret = __ptr_ring_empty(r); spin_unlock_bh(&r->consumer_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin41100.00%2100.00%
Total41100.00%2100.00%

/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r) { /* Fundamentally, what we want to do is update consumer * index and zero out the entry so producer can reuse it. * Doing it naively at each consume would be as simple as: * r->queue[r->consumer++] = NULL; * if (unlikely(r->consumer >= r->size)) * r->consumer = 0; * but that is suboptimal when the ring is full as producer is writing * out new entries in the same cache line. Defer these updates until a * batch of entries has been consumed. */ int head = r->consumer_head++; /* Once we have processed enough entries invalidate them in * the ring all at once so producer can reuse their space in the ring. * We also do this when we reach end of the ring - not mandatory * but helps keep the implementation simple. */ if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || r->consumer_head >= r->size)) { /* Zero out entries in the reverse order: this way we touch the * cache line that producer might currently be reading the last; * producer won't make progress and touch other cache lines * besides the first one until we write out all entries. */ while (likely(head >= r->consumer_tail)) r->queue[head--] = NULL; r->consumer_tail = r->consumer_head; } if (unlikely(r->consumer_head >= r->size)) { r->consumer_head = 0; r->consumer_tail = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin106100.00%2100.00%
Total106100.00%2100.00%


static inline void *__ptr_ring_consume(struct ptr_ring *r) { void *ptr; ptr = __ptr_ring_peek(r); if (ptr) __ptr_ring_discard_one(r); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin36100.00%1100.00%
Total36100.00%1100.00%

/* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when * producing. */
static inline void *ptr_ring_consume(struct ptr_ring *r) { void *ptr; spin_lock(&r->consumer_lock); ptr = __ptr_ring_consume(r); spin_unlock(&r->consumer_lock); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin43100.00%1100.00%
Total43100.00%1100.00%


static inline void *ptr_ring_consume_irq(struct ptr_ring *r) { void *ptr; spin_lock_irq(&r->consumer_lock); ptr = __ptr_ring_consume(r); spin_unlock_irq(&r->consumer_lock); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin43100.00%1100.00%
Total43100.00%1100.00%


static inline void *ptr_ring_consume_any(struct ptr_ring *r) { unsigned long flags; void *ptr; spin_lock_irqsave(&r->consumer_lock, flags); ptr = __ptr_ring_consume(r); spin_unlock_irqrestore(&r->consumer_lock, flags); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin51100.00%1100.00%
Total51100.00%1100.00%


static inline void *ptr_ring_consume_bh(struct ptr_ring *r) { void *ptr; spin_lock_bh(&r->consumer_lock); ptr = __ptr_ring_consume(r); spin_unlock_bh(&r->consumer_lock); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin43100.00%1100.00%
Total43100.00%1100.00%

/* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. */ #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) #define PTR_RING_PEEK_CALL(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ \ spin_lock(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v; \ }) #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ \ spin_lock_irq(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock_irq(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v; \ }) #define PTR_RING_PEEK_CALL_BH(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ \ spin_lock_bh(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock_bh(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v; \ }) #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ unsigned long __PTR_RING_PEEK_CALL_f;\ \ spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ __PTR_RING_PEEK_CALL_v; \ })
static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) { return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin34100.00%1100.00%
Total34100.00%1100.00%


static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) { r->size = size; r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); /* We need to set batch at least to 1 to make logic * in __ptr_ring_discard_one work correctly. * Batching too much (because ring is small) would cause a lot of * burstiness. Needs tuning, for now disable batching. */ if (r->batch > r->size / 2 || !r->batch) r->batch = 1; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin63100.00%1100.00%
Total63100.00%1100.00%


static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) { r->queue = __ptr_ring_init_queue_alloc(size, gfp); if (!r->queue) return -ENOMEM; __ptr_ring_set_size(r, size); r->producer = r->consumer_head = r->consumer_tail = 0; spin_lock_init(&r->producer_lock); spin_lock_init(&r->consumer_lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin80100.00%3100.00%
Total80100.00%3100.00%


static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, int size, gfp_t gfp, void (*destroy)(void *)) { int producer = 0; void **old; void *ptr; while ((ptr = __ptr_ring_consume(r))) if (producer < size) queue[producer++] = ptr; else if (destroy) destroy(ptr); __ptr_ring_set_size(r, size); r->producer = producer; r->consumer_head = 0; r->consumer_tail = 0; old = r->queue; r->queue = queue; return old; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin124100.00%4100.00%
Total124100.00%4100.00%

/* * Note: producer lock is nested within consumer lock, so if you * resize you must make sure all uses nest correctly. * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */
static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; void **queue = __ptr_ring_init_queue_alloc(size, gfp); void **old; if (!queue) return -ENOMEM; spin_lock_irqsave(&(r)->consumer_lock, flags); spin_lock(&(r)->producer_lock); old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); spin_unlock(&(r)->producer_lock); spin_unlock_irqrestore(&(r)->consumer_lock, flags); kfree(old); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin125100.00%3100.00%
Total125100.00%3100.00%

/* * Note: producer lock is nested within consumer lock, so if you * resize you must make sure all uses nest correctly. * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, int size, gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; void ***queues; int i; queues = kmalloc(nrings * sizeof *queues, gfp); if (!queues) goto noqueues; for (i = 0; i < nrings; ++i) { queues[i] = __ptr_ring_init_queue_alloc(size, gfp); if (!queues[i]) goto nomem; } for (i = 0; i < nrings; ++i) { spin_lock_irqsave(&(rings[i])->consumer_lock, flags); spin_lock(&(rings[i])->producer_lock); queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], size, gfp, destroy); spin_unlock(&(rings[i])->producer_lock); spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); } for (i = 0; i < nrings; ++i) kfree(queues[i]); kfree(queues); return 0; nomem: while (--i >= 0) kfree(queues[i]); kfree(queues); noqueues: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin256100.00%2100.00%
Total256100.00%2100.00%


static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) { void *ptr; if (destroy) while ((ptr = ptr_ring_consume(r))) destroy(ptr); kfree(r->queue); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin53100.00%2100.00%
Total53100.00%2100.00%

#endif /* _LINUX_PTR_RING_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin185898.94%583.33%
Jason (Hui) Wang201.06%116.67%
Total1878100.00%6100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.