Release 4.18 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
  
  
  
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LOCKS_H
#define LOCKS_H
#include <limits.h>
#include <pthread.h>
#include <stdbool.h>
#include "assume.h"
#include "bug_on.h"
#include "preempt.h"
int nondet_int(void);
#define __acquire(x)
#define __acquires(x)
#define __release(x)
#define __releases(x)
/* Only use one lock mechanism. Select which one. */
#ifdef PTHREAD_LOCK
struct lock_impl {
	
pthread_mutex_t mutex;
};
static inline void lock_impl_lock(struct lock_impl *lock)
{
	BUG_ON(pthread_mutex_lock(&lock->mutex));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 23 | 100.00% | 1 | 100.00% | 
| Total | 23 | 100.00% | 1 | 100.00% | 
static inline void lock_impl_unlock(struct lock_impl *lock)
{
	BUG_ON(pthread_mutex_unlock(&lock->mutex));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 23 | 100.00% | 1 | 100.00% | 
| Total | 23 | 100.00% | 1 | 100.00% | 
static inline bool lock_impl_trylock(struct lock_impl *lock)
{
	int err = pthread_mutex_trylock(&lock->mutex);
	if (!err)
		return true;
	else if (err == EBUSY)
		return false;
	BUG();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 44 | 100.00% | 1 | 100.00% | 
| Total | 44 | 100.00% | 1 | 100.00% | 
static inline void lock_impl_init(struct lock_impl *lock)
{
	pthread_mutex_init(&lock->mutex, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 22 | 100.00% | 1 | 100.00% | 
| Total | 22 | 100.00% | 1 | 100.00% | 
#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
#else /* !defined(PTHREAD_LOCK) */
/* Spinlock that assumes that it always gets the lock immediately. */
struct lock_impl {
	
bool locked;
};
static inline bool lock_impl_trylock(struct lock_impl *lock)
{
#ifdef RUN
	/* TODO: Should this be a test and set? */
	return __sync_bool_compare_and_swap(&lock->locked, false, true);
#else
	__CPROVER_atomic_begin();
	bool old_locked = lock->locked;
	lock->locked = true;
	__CPROVER_atomic_end();
	/* Minimal barrier to prevent accesses leaking out of lock. */
	__CPROVER_fence("RRfence", "RWfence");
	return !old_locked;
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 64 | 100.00% | 1 | 100.00% | 
| Total | 64 | 100.00% | 1 | 100.00% | 
static inline void lock_impl_lock(struct lock_impl *lock)
{
	/*
         * CBMC doesn't support busy waiting, so just assume that the
         * lock is available.
         */
	assume(lock_impl_trylock(lock));
	/*
         * If the lock was already held by this thread then the assumption
         * is unsatisfiable (deadlock).
         */
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 22 | 100.00% | 1 | 100.00% | 
| Total | 22 | 100.00% | 1 | 100.00% | 
static inline void lock_impl_unlock(struct lock_impl *lock)
{
#ifdef RUN
	BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
#else
	/* Minimal barrier to prevent accesses leaking out of lock. */
	__CPROVER_fence("RWfence", "WWfence");
	__CPROVER_atomic_begin();
	bool old_locked = lock->locked;
	lock->locked = false;
	__CPROVER_atomic_end();
	BUG_ON(!old_locked);
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 68 | 100.00% | 1 | 100.00% | 
| Total | 68 | 100.00% | 1 | 100.00% | 
static inline void lock_impl_init(struct lock_impl *lock)
{
	lock->locked = false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 18 | 100.00% | 1 | 100.00% | 
| Total | 18 | 100.00% | 1 | 100.00% | 
#define LOCK_IMPL_INITIALIZER {.locked = false}
#endif /* !defined(PTHREAD_LOCK) */
/*
 * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
 * locks of different types.
 */
typedef struct {
	
struct lock_impl internal_lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
static inline void spin_lock_init(spinlock_t *lock)
{
	lock_impl_init(&lock->internal_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 19 | 100.00% | 1 | 100.00% | 
| Total | 19 | 100.00% | 1 | 100.00% | 
static inline void spin_lock(spinlock_t *lock)
{
	/*
         * Spin locks also need to be removed in order to eliminate all
         * memory barriers. They are only used by the write side anyway.
         */
#ifndef NO_SYNC_SMP_MB
	preempt_disable();
	lock_impl_lock(&lock->internal_lock);
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 28 | 100.00% | 1 | 100.00% | 
| Total | 28 | 100.00% | 1 | 100.00% | 
static inline void spin_unlock(spinlock_t *lock)
{
#ifndef NO_SYNC_SMP_MB
	lock_impl_unlock(&lock->internal_lock);
	preempt_enable();
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 27 | 100.00% | 1 | 100.00% | 
| Total | 27 | 100.00% | 1 | 100.00% | 
/* Don't bother with interrupts */
#define spin_lock_irq(lock) spin_lock(lock)
#define spin_unlock_irq(lock) spin_unlock(lock)
#define spin_lock_irqsave(lock, flags) spin_lock(lock)
#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
/*
 * This is supposed to return an int, but I think that a bool should work as
 * well.
 */
static inline bool spin_trylock(spinlock_t *lock)
{
#ifndef NO_SYNC_SMP_MB
	preempt_disable();
	return lock_impl_trylock(&lock->internal_lock);
#else
	return true;
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 33 | 100.00% | 1 | 100.00% | 
| Total | 33 | 100.00% | 1 | 100.00% | 
struct completion {
	/* Hopefuly this won't overflow. */
	
unsigned int count;
};
#define COMPLETION_INITIALIZER(x) {.count = 0}
#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
static inline void init_completion(struct completion *c)
{
	c->count = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 18 | 100.00% | 1 | 100.00% | 
| Total | 18 | 100.00% | 1 | 100.00% | 
static inline void wait_for_completion(struct completion *c)
{
	unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
	assume(prev_count);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 31 | 100.00% | 1 | 100.00% | 
| Total | 31 | 100.00% | 1 | 100.00% | 
static inline void complete(struct completion *c)
{
	unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
	BUG_ON(prev_count == UINT_MAX);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 33 | 100.00% | 1 | 100.00% | 
| Total | 33 | 100.00% | 1 | 100.00% | 
/* This function probably isn't very useful for CBMC. */
static inline bool try_wait_for_completion(struct completion *c)
{
	BUG();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 15 | 100.00% | 1 | 100.00% | 
| Total | 15 | 100.00% | 1 | 100.00% | 
static inline bool completion_done(struct completion *c)
{
	return c->count;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 17 | 100.00% | 1 | 100.00% | 
| Total | 17 | 100.00% | 1 | 100.00% | 
/* TODO: Implement complete_all */
static inline void complete_all(struct completion *c)
{
	BUG();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 15 | 100.00% | 1 | 100.00% | 
| Total | 15 | 100.00% | 1 | 100.00% | 
#endif
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Lance Roy | 707 | 99.86% | 1 | 50.00% | 
| Greg Kroah-Hartman | 1 | 0.14% | 1 | 50.00% | 
| Total | 708 | 100.00% | 2 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.