cregit-Linux how code gets into the kernel

Release 4.15 kernel/locking/rwsem-xadd.c

Directory: kernel/locking
// SPDX-License-Identifier: GPL-2.0
/* rwsem.c: R/W semaphores: contention handling functions
 * Written by David Howells (
 * Derived from arch/i386/kernel/semaphore.c
 * Writer lock-stealing by Alex Shi <>
 * and Michel Lespinasse <>
 * Optimistic spinning by Tim Chen <>
 * and Davidlohr Bueso <>. Based on mutexes.
#include <linux/rwsem.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/sched/debug.h>
#include <linux/osq_lock.h>

#include "rwsem.h"

 * Guide to the rw_semaphore's count field for common values.
 * (32-bit case illustrated, similar for 64-bit)
 * 0x0000000X   (1) X readers active or attempting lock, no writer waiting
 *                  X = #active_readers + #readers attempting to lock
 *                  (X*ACTIVE_BIAS)
 * 0x00000000   rwsem is unlocked, and no one is waiting for the lock or
 *              attempting to read lock or write lock.
 * 0xffff000X   (1) X readers active or attempting lock, with waiters for lock
 *                  X = #active readers + # readers attempting lock
 *                  (X*ACTIVE_BIAS + WAITING_BIAS)
 *              (2) 1 writer attempting lock, no waiters for lock
 *                  X-1 = #active readers + #readers attempting lock
 *                  ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *              (3) 1 writer active, no waiters for lock
 *                  X-1 = #active readers + #readers attempting lock
 *                  ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 * 0xffff0001   (1) 1 reader active or attempting lock, waiters for lock
 *                  (WAITING_BIAS + ACTIVE_BIAS)
 *              (2) 1 writer active or attempting lock, no waiters for lock
 *                  (ACTIVE_WRITE_BIAS)
 * 0xffff0000   (1) There are writers or readers queued but none active
 *                  or in the process of attempting lock.
 *                  (WAITING_BIAS)
 *              Note: writer can attempt to steal lock for this count by adding
 *              ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 * 0xfffe0001   (1) 1 writer active, or attempting lock. Waiters on queue.
 *                  (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 *       the count becomes more than 0 for successful lock acquisition,
 *       i.e. the case where there are only readers or nobody has lock.
 *       (1st and 2nd case above).
 *       Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 *       checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 *       acquisition (i.e. nobody else has lock or attempts lock).  If
 *       unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 *       are only waiters but none active (5th case above), and attempt to
 *       steal the lock.

 * Initialize an rwsem:

void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER sem->owner = NULL; osq_lock_init(&sem->osq); #endif }


Ingo Molnar7373.00%112.50%
Davidlohr Bueso A1515.00%225.00%
Jason Low99.00%337.50%
Peter Zijlstra22.00%112.50%
Thomas Gleixner11.00%112.50%

EXPORT_SYMBOL(__init_rwsem); enum rwsem_waiter_type { RWSEM_WAITING_FOR_WRITE, RWSEM_WAITING_FOR_READ }; struct rwsem_waiter { struct list_head list; struct task_struct *task; enum rwsem_waiter_type type; }; enum rwsem_wake_type { RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ RWSEM_WAKE_READERS, /* Wake readers only */ RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ }; /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) * - there must be someone on the queue * - the wait_lock must be held by the caller * - tasks are marked for wakeup, the caller must later invoke wake_up_q() * to actually wakeup the blocked task(s) and drop the reference count, * preferably when the wait_lock is released * - woken process blocks are discarded from the list after having task zeroed * - writers are only marked woken if downgrading is false */
static void __rwsem_mark_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type, struct wake_q_head *wake_q) { struct rwsem_waiter *waiter, *tmp; long oldcount, woken = 0, adjustment = 0; /* * Take a peek at the queue head waiter such that we can determine * the wakeup(s) to perform. */ waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list); if (waiter->type == RWSEM_WAITING_FOR_WRITE) { if (wake_type == RWSEM_WAKE_ANY) { /* * Mark writer at the front of the queue for wakeup. * Until the task is actually later awoken later by * the caller, other writers are able to steal it. * Readers, on the other hand, will block as they * will notice the queued writer. */ wake_q_add(wake_q, waiter->task); } return; } /* * Writers might steal the lock before we grant it to the next reader. * We prefer to do the first reader grant before counting readers * so we can bail out early if a writer stole the lock. */ if (wake_type != RWSEM_WAKE_READ_OWNED) { adjustment = RWSEM_ACTIVE_READ_BIAS; try_reader_grant: oldcount = atomic_long_fetch_add(adjustment, &sem->count); if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { /* * If the count is still less than RWSEM_WAITING_BIAS * after removing the adjustment, it is assumed that * a writer has stolen the lock. We have to undo our * reader grant. */ if (atomic_long_add_return(-adjustment, &sem->count) < RWSEM_WAITING_BIAS) return; /* Last active locker left. Retry waking readers. */ goto try_reader_grant; } /* * It is not really necessary to set it to reader-owned here, * but it gives the spinners an early indication that the * readers now have the lock. */ rwsem_set_reader_owned(sem); } /* * Grant an infinite number of read locks to the readers at the front * of the queue. We know that woken will be at least 1 as we accounted * for above. Note we increment the 'active part' of the count by the * number of readers before waking any processes up. */ list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { struct task_struct *tsk; if (waiter->type == RWSEM_WAITING_FOR_WRITE) break; woken++; tsk = waiter->task; wake_q_add(wake_q, tsk); list_del(&waiter->list); /* * Ensure that the last operation is setting the reader * waiter to nil such that rwsem_down_read_failed() cannot * race with do_exit() by always holding a reference count * to the task to wakeup. */ smp_store_release(&waiter->task, NULL); } adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; if (list_empty(&sem->wait_list)) { /* hit end of list above */ adjustment -= RWSEM_WAITING_BIAS; } if (adjustment) atomic_long_add(adjustment, &sem->count); }


Davidlohr Bueso A7931.60%315.79%
Michel Lespinasse7530.00%736.84%
Linus Torvalds6827.20%315.79%
Waiman Long114.40%210.53%
Jason Low104.00%15.26%
David Howells52.00%15.26%
Peter Zijlstra10.40%15.26%
Alex Shi10.40%15.26%

/* * Wait for the read lock to be granted */
static inline struct rw_semaphore __sched * __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) { long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; struct rwsem_waiter waiter; DEFINE_WAKE_Q(wake_q); waiter.task = current; waiter.type = RWSEM_WAITING_FOR_READ; raw_spin_lock_irq(&sem->wait_lock); if (list_empty(&sem->wait_list)) adjustment += RWSEM_WAITING_BIAS; list_add_tail(&waiter.list, &sem->wait_list); /* we're now waiting on the lock, but no longer actively locking */ count = atomic_long_add_return(adjustment, &sem->count); /* * If there are no active locks, wake the front queued process(es). * * If there are no writers and we are first in the queue, * wake our own waiter to join the existing active readers ! */ if (count == RWSEM_WAITING_BIAS || (count > RWSEM_WAITING_BIAS && adjustment != -RWSEM_ACTIVE_READ_BIAS)) __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); /* wait to be given the lock */ while (true) { set_current_state(state); if (!waiter.task) break; if (signal_pending_state(state, current)) { raw_spin_lock_irq(&sem->wait_lock); if (waiter.task) goto out_nolock; raw_spin_unlock_irq(&sem->wait_lock); break; } schedule(); } __set_current_state(TASK_RUNNING); return sem; out_nolock: list_del(&waiter.list); if (list_empty(&sem->wait_list)) atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); raw_spin_unlock_irq(&sem->wait_lock); __set_current_state(TASK_RUNNING); return ERR_PTR(-EINTR); }


Kirill Tkhai9637.50%13.85%
Linus Torvalds6826.56%519.23%
Michel Lespinasse6224.22%934.62%
Davidlohr Bueso A228.59%623.08%
Jason Low41.56%13.85%
David Howells10.39%13.85%
Waiman Long10.39%13.85%
Livio Soares10.39%13.85%
Thomas Gleixner10.39%13.85%

__visible struct rw_semaphore * __sched rwsem_down_read_failed(struct rw_semaphore *sem) { return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE); }


Kirill Tkhai2195.45%150.00%
Linus Torvalds14.55%150.00%

__visible struct rw_semaphore * __sched rwsem_down_read_failed_killable(struct rw_semaphore *sem) { return __rwsem_down_read_failed_common(sem, TASK_KILLABLE); }


Kirill Tkhai22100.00%1100.00%

EXPORT_SYMBOL(rwsem_down_read_failed_killable); /* * This function must be called with the sem->wait_lock held to prevent * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. */
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) { /* * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS. */ if (count != RWSEM_WAITING_BIAS) return false; /* * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there * are other tasks on the wait list, we need to add on WAITING_BIAS. */ count = list_is_singular(&sem->wait_list) ? RWSEM_ACTIVE_WRITE_BIAS : RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS; if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == RWSEM_WAITING_BIAS) { rwsem_set_owner(sem); return true; } return false; }


Davidlohr Bueso A2738.03%225.00%
Jason Low2535.21%225.00%
Linus Torvalds1014.08%337.50%
Michel Lespinasse912.68%112.50%

#ifdef CONFIG_RWSEM_SPIN_ON_OWNER /* * Try to acquire write lock before the writer has been put on wait queue. */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) { long old, count = atomic_long_read(&sem->count); while (true) { if (!(count == 0 || count == RWSEM_WAITING_BIAS)) return false; old = atomic_long_cmpxchg_acquire(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS); if (old == count) { rwsem_set_owner(sem); return true; } count = old; } }


Davidlohr Bueso A5465.06%337.50%
Michel Lespinasse2631.33%450.00%
Jason Low33.61%112.50%

static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner; bool ret = true; if (need_resched()) return false; rcu_read_lock(); owner = READ_ONCE(sem->owner); if (!rwsem_owner_is_writer(owner)) { /* * Don't spin if the rwsem is readers owned. */ ret = !rwsem_owner_is_reader(owner); goto done; } /* * As lock holder preemption issue, we both skip spinning if task is not * on cpu or its cpu is preempted */ ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); done: rcu_read_unlock(); return ret; }


Davidlohr Bueso A6675.00%337.50%
Pan Xinhui1011.36%112.50%
Waiman Long910.23%112.50%
Michel Lespinasse22.27%225.00%
Jason Low11.14%112.50%

/* * Return true only if we can still spin on the owner field of the rwsem. */
static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner = READ_ONCE(sem->owner); if (!rwsem_owner_is_writer(owner)) goto out; rcu_read_lock(); while (sem->owner == owner) { /* * Ensure we emit the owner->on_cpu, dereference _after_ * checking sem->owner still matches owner, if that fails, * owner might point to free()d memory, if it still matches, * the rcu_read_lock() ensures the memory stays valid. */ barrier(); /* * abort spinning when need_resched or owner is not running or * owner's cpu is preempted. */ if (!owner->on_cpu || need_resched() || vcpu_is_preempted(task_cpu(owner))) { rcu_read_unlock(); return false; } cpu_relax(); } rcu_read_unlock(); out: /* * If there is a new owner or the owner is not set, we continue * spinning. */ return !rwsem_owner_is_reader(READ_ONCE(sem->owner)); }


Davidlohr Bueso A5050.00%228.57%
Waiman Long2828.00%228.57%
Jason Low1212.00%114.29%
Pan Xinhui99.00%114.29%
Christian Bornträger11.00%114.29%

static bool rwsem_optimistic_spin(struct rw_semaphore *sem) { bool taken = false; preempt_disable(); /* sem->wait_lock should not be held when doing optimistic spinning */ if (!rwsem_can_spin_on_owner(sem)) goto done; if (!osq_lock(&sem->osq)) goto done; /* * Optimistically spin on the owner field and attempt to acquire the * lock whenever the owner changes. Spinning will be stopped when: * 1) the owning writer isn't running; or * 2) readers own the lock as we can't determine if they are * actively running or not. */ while (rwsem_spin_on_owner(sem)) { /* * Try to acquire the lock */ if (rwsem_try_write_lock_unqueued(sem)) { taken = true; break; } /* * When there's no owner, we might have preempted between the * owner acquiring the lock and setting the owner field. If * we're an RT task that will live-lock because we won't let * the owner complete. */ if (!sem->owner && (need_resched() || rt_task(current))) break; /* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need * memory barriers as we'll eventually observe the right * values at the cost of a few extra spins. */ cpu_relax(); } osq_unlock(&sem->osq); done: preempt_enable(); return taken; }


Davidlohr Bueso A10394.50%133.33%
Waiman Long54.59%133.33%
Christian Bornträger10.92%133.33%

/* * Return true if the rwsem has active spinner */
static inline bool rwsem_has_spinner(struct rw_semaphore *sem) { return osq_is_locked(&sem->osq); }


Waiman Long21100.00%1100.00%

static bool rwsem_optimistic_spin(struct rw_semaphore *sem) { return false; }


Davidlohr Bueso A14100.00%1100.00%

static inline bool rwsem_has_spinner(struct rw_semaphore *sem) { return false; }


Waiman Long15100.00%1100.00%

#endif /* * Wait until we successfully acquire the write lock */
static inline struct rw_semaphore * __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) { long count; bool waiting = true; /* any queued threads before us */ struct rwsem_waiter waiter; struct rw_semaphore *ret = sem; DEFINE_WAKE_Q(wake_q); /* undo write bias from down_write operation, stop active locking */ count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count); /* do optimistic spinning and steal lock if possible */ if (rwsem_optimistic_spin(sem)) return sem; /* * Optimistic spinning failed, proceed to the slowpath * and block until we can acquire the sem. */ waiter.task = current; waiter.type = RWSEM_WAITING_FOR_WRITE; raw_spin_lock_irq(&sem->wait_lock); /* account for this before adding a new element to the list */ if (list_empty(&sem->wait_list)) waiting = false; list_add_tail(&waiter.list, &sem->wait_list); /* we're now waiting on the lock, but no longer actively locking */ if (waiting) { count = atomic_long_read(&sem->count); /* * If there were already threads queued before us and there are * no active writers, the lock must be read owned; so we try to * wake any read locks that were queued ahead of us. */ if (count > RWSEM_WAITING_BIAS) { __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); /* * The wakeup is normally called _after_ the wait_lock * is released, but given that we are proactively waking * readers we can deal with the wake_q overhead as it is * similar to releasing and taking the wait_lock again * for attempting rwsem_try_write_lock(). */ wake_up_q(&wake_q); /* * Reinitialize wake_q after use. */ wake_q_init(&wake_q); } } else count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count); /* wait until we successfully acquire the lock */ set_current_state(state); while (true) { if (rwsem_try_write_lock(count, sem)) break; raw_spin_unlock_irq(&sem->wait_lock); /* Block until there are no active lockers. */ do { if (signal_pending_state(state, current)) goto out_nolock; schedule(); set_current_state(state); } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK); raw_spin_lock_irq(&sem->wait_lock); } __set_current_state(TASK_RUNNING); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); return ret; out_nolock: __set_current_state(TASK_RUNNING); raw_spin_lock_irq(&sem->wait_lock); list_del(&waiter.list); if (list_empty(&sem->wait_list)) atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); else __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); return ERR_PTR(-EINTR); }


Davidlohr Bueso A19654.90%216.67%
Peter Zijlstra6317.65%18.33%
Michel Lespinasse4211.76%433.33%
Michal Hocko298.12%18.33%
Jason Low185.04%18.33%
Waiman Long82.24%216.67%
Andrew Morton10.28%18.33%

__visible struct rw_semaphore * __sched rwsem_down_write_failed(struct rw_semaphore *sem) { return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE); }


Michal Hocko1986.36%133.33%
Linus Torvalds29.09%133.33%
Michel Lespinasse14.55%133.33%

__visible struct rw_semaphore * __sched rwsem_down_write_failed_killable(struct rw_semaphore *sem) { return __rwsem_down_write_failed_common(sem, TASK_KILLABLE); }


Michal Hocko22100.00%1100.00%

EXPORT_SYMBOL(rwsem_down_write_failed_killable); /* * handle waking up a waiter on the semaphore * - up_read/up_write has decremented the active part of count if we come here */
__visible struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) { unsigned long flags; DEFINE_WAKE_Q(wake_q); /* * __rwsem_down_write_failed_common(sem) * rwsem_optimistic_spin(sem) * osq_unlock(sem->osq) * ... * atomic_long_add_return(&sem->count) * * - VS - * * __up_write() * if (atomic_long_sub_return_release(&sem->count) < 0) * rwsem_wake(sem) * osq_is_locked(&sem->osq) * * And __up_write() must observe !osq_is_locked() when it observes the * atomic_long_add_return() in order to not miss a wakeup. * * This boils down to: * * [S.rel] X = 1 [RmW] r0 = (Y += 0) * MB RMB * [RmW] Y += 1 [L] r1 = X * * exists (r0=1 /\ r1=0) */ smp_rmb(); /* * If a spinner is present, it is not necessary to do the wakeup. * Try to do wakeup only if the trylock succeeds to minimize * spinlock contention which may introduce too much delay in the * unlock operation. * * spinning writer up_write/up_read caller * --------------- ----------------------- * [S] osq_unlock() [L] osq * MB RMB * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock) * * Here, it is important to make sure that there won't be a missed * wakeup while the rwsem is free and the only spinning writer goes * to sleep without taking the rwsem. Even when the spinning writer * is just going to break out of the waiting loop, it will still do * a trylock in rwsem_down_write_failed() before sleeping. IOW, if * rwsem_has_spinner() is true, it will guarantee at least one * trylock attempt on the rwsem later on. */ if (rwsem_has_spinner(sem)) { /* * The smp_rmb() here is to make sure that the spinner * state is consulted before reading the wait_lock. */ smp_rmb(); if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) return sem; goto locked; } raw_spin_lock_irqsave(&sem->wait_lock, flags); locked: if (!list_empty(&sem->wait_list)) __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); wake_up_q(&wake_q); return sem; }


Linus Torvalds4439.64%325.00%
Waiman Long3632.43%216.67%
Davidlohr Bueso A1412.61%18.33%
David Howells98.11%216.67%
Prateek Sood43.60%18.33%
Thomas Gleixner21.80%18.33%
Michel Lespinasse10.90%18.33%
Andi Kleen10.90%18.33%

EXPORT_SYMBOL(rwsem_wake); /* * downgrade a write lock into a read lock * - caller incremented waiting part of count and discovered it still negative * - just wake up any readers at the front of the queue */
__visible struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) { unsigned long flags; DEFINE_WAKE_Q(wake_q); raw_spin_lock_irqsave(&sem->wait_lock, flags); if (!list_empty(&sem->wait_list)) __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); wake_up_q(&wake_q); return sem; }


David Howells5373.61%228.57%
Davidlohr Bueso A1419.44%114.29%
Thomas Gleixner22.78%114.29%
Michel Lespinasse11.39%114.29%
Andi Kleen11.39%114.29%
Waiman Long11.39%114.29%


Overall Contributors

Davidlohr Bueso A69037.18%1520.27%
Michel Lespinasse24313.09%1621.62%
Linus Torvalds21511.58%68.11%
Kirill Tkhai1447.76%11.35%
Waiman Long1377.38%68.11%
Ingo Molnar884.74%45.41%
Jason Low834.47%68.11%
Michal Hocko754.04%11.35%
David Howells723.88%34.05%
Peter Zijlstra663.56%34.05%
Pan Xinhui191.02%11.35%
Thomas Gleixner60.32%11.35%
Prateek Sood40.22%11.35%
Andrew Morton30.16%22.70%
Andi Kleen20.11%11.35%
Christian Bornträger20.11%11.35%
Nicholas Piggin20.11%11.35%
Geert Uytterhoeven10.05%11.35%
Alex Shi10.05%11.35%
Greg Kroah-Hartman10.05%11.35%
Tim Chen10.05%11.35%
Livio Soares10.05%11.35%
Directory: kernel/locking
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.