Contributors: 8
Author Tokens Token Proportion Commits Commit Proportion
Linus Torvalds 55 37.41% 5 41.67%
Waiman Long 45 30.61% 1 8.33%
Steven Whitehouse 25 17.01% 1 8.33%
Peter Zijlstra 10 6.80% 1 8.33%
Andreas Gruenbacher 9 6.12% 1 8.33%
Yaowei Bai 1 0.68% 1 8.33%
Kirill A. Shutemov 1 0.68% 1 8.33%
Greg Kroah-Hartman 1 0.68% 1 8.33%
Total 147 12


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H

/*
 * Locked reference counts.
 *
 * These are different from just plain atomic refcounts in that they
 * are atomic with respect to the spinlock that goes with them.  In
 * particular, there can be implementations that don't actually get
 * the spinlock for the common decrement/increment operations, but they
 * still have to check that the operation is done semantically as if
 * the spinlock had been taken (using a cmpxchg operation that covers
 * both the lock and the count word, or using memory transactions, for
 * example).
 */

#include <linux/spinlock.h>
#include <generated/bounds.h>

#define USE_CMPXCHG_LOCKREF \
	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
	 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)

struct lockref {
	union {
#if USE_CMPXCHG_LOCKREF
		aligned_u64 lock_count;
#endif
		struct {
			spinlock_t lock;
			int count;
		};
	};
};

extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_put_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);

extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);

/* Must be called under spinlock for reliable results */
static inline bool __lockref_is_dead(const struct lockref *l)
{
	return ((int)l->count < 0);
}

#endif /* __LINUX_LOCKREF_H */