Contributors: 22
Author Tokens Token Proportion Commits Commit Proportion
Philipp Zabel 2037 38.99% 12 25.53%
Vivek Gautam 873 16.71% 1 2.13%
Krzysztof Kozlowski 793 15.18% 2 4.26%
Hans de Goede 428 8.19% 4 8.51%
Bartosz Golaszewski 317 6.07% 2 4.26%
Amjad Ouled-Ameur 226 4.33% 1 2.13%
Geert Uytterhoeven 130 2.49% 5 10.64%
Ramiro Oliveira 97 1.86% 2 4.26%
Masahiro Yamada 77 1.47% 2 4.26%
Thierry Reding 66 1.26% 2 4.26%
Krishna Yarlagadda 61 1.17% 1 2.13%
Martin Blumenstingl 39 0.75% 1 2.13%
Maxime Ripard 26 0.50% 2 4.26%
Kees Cook 15 0.29% 2 4.26%
Dinh Nguyen 14 0.27% 1 2.13%
Vince Hsu 9 0.17% 1 2.13%
Colin Ian King 9 0.17% 1 2.13%
Andy Shevchenko 2 0.04% 1 2.13%
Heiner Kallweit 2 0.04% 1 2.13%
Thomas Gleixner 2 0.04% 1 2.13%
Rashika Kheria 1 0.02% 1 2.13%
Linus Torvalds 1 0.02% 1 2.13%
Total 5225 47


// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Reset Controller framework
 *
 * Copyright 2013 Philipp Zabel, Pengutronix
 */
#include <linux/atomic.h>
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>

static DEFINE_MUTEX(reset_list_mutex);
static LIST_HEAD(reset_controller_list);

static DEFINE_MUTEX(reset_lookup_mutex);
static LIST_HEAD(reset_lookup_list);

/* Protects reset_gpio_lookup_list */
static DEFINE_MUTEX(reset_gpio_lookup_mutex);
static LIST_HEAD(reset_gpio_lookup_list);
static DEFINE_IDA(reset_gpio_ida);

/**
 * struct reset_control - a reset control
 * @rcdev: a pointer to the reset controller device
 *         this reset control belongs to
 * @list: list entry for the rcdev's reset controller list
 * @id: ID of the reset controller in the reset
 *      controller device
 * @refcnt: Number of gets of this reset_control
 * @acquired: Only one reset_control may be acquired for a given rcdev and id.
 * @shared: Is this a shared (1), or an exclusive (0) reset_control?
 * @array: Is this an array of reset controls (1)?
 * @deassert_count: Number of times this reset line has been deasserted
 * @triggered_count: Number of times this reset line has been reset. Currently
 *                   only used for shared resets, which means that the value
 *                   will be either 0 or 1.
 */
struct reset_control {
	struct reset_controller_dev *rcdev;
	struct list_head list;
	unsigned int id;
	struct kref refcnt;
	bool acquired;
	bool shared;
	bool array;
	atomic_t deassert_count;
	atomic_t triggered_count;
};

/**
 * struct reset_control_array - an array of reset controls
 * @base: reset control for compatibility with reset control API functions
 * @num_rstcs: number of reset controls
 * @rstc: array of reset controls
 */
struct reset_control_array {
	struct reset_control base;
	unsigned int num_rstcs;
	struct reset_control *rstc[] __counted_by(num_rstcs);
};

/**
 * struct reset_gpio_lookup - lookup key for ad-hoc created reset-gpio devices
 * @of_args: phandle to the reset controller with all the args like GPIO number
 * @list: list entry for the reset_gpio_lookup_list
 */
struct reset_gpio_lookup {
	struct of_phandle_args of_args;
	struct list_head list;
};

static const char *rcdev_name(struct reset_controller_dev *rcdev)
{
	if (rcdev->dev)
		return dev_name(rcdev->dev);

	if (rcdev->of_node)
		return rcdev->of_node->full_name;

	if (rcdev->of_args)
		return rcdev->of_args->np->full_name;

	return NULL;
}

/**
 * of_reset_simple_xlate - translate reset_spec to the reset line number
 * @rcdev: a pointer to the reset controller device
 * @reset_spec: reset line specifier as found in the device tree
 *
 * This static translation function is used by default if of_xlate in
 * :c:type:`reset_controller_dev` is not set. It is useful for all reset
 * controllers with 1:1 mapping, where reset lines can be indexed by number
 * without gaps.
 */
static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
				 const struct of_phandle_args *reset_spec)
{
	if (reset_spec->args[0] >= rcdev->nr_resets)
		return -EINVAL;

	return reset_spec->args[0];
}

/**
 * reset_controller_register - register a reset controller device
 * @rcdev: a pointer to the initialized reset controller device
 */
int reset_controller_register(struct reset_controller_dev *rcdev)
{
	if (rcdev->of_node && rcdev->of_args)
		return -EINVAL;

	if (!rcdev->of_xlate) {
		rcdev->of_reset_n_cells = 1;
		rcdev->of_xlate = of_reset_simple_xlate;
	}

	INIT_LIST_HEAD(&rcdev->reset_control_head);

	mutex_lock(&reset_list_mutex);
	list_add(&rcdev->list, &reset_controller_list);
	mutex_unlock(&reset_list_mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(reset_controller_register);

/**
 * reset_controller_unregister - unregister a reset controller device
 * @rcdev: a pointer to the reset controller device
 */
void reset_controller_unregister(struct reset_controller_dev *rcdev)
{
	mutex_lock(&reset_list_mutex);
	list_del(&rcdev->list);
	mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_controller_unregister);

static void devm_reset_controller_release(struct device *dev, void *res)
{
	reset_controller_unregister(*(struct reset_controller_dev **)res);
}

/**
 * devm_reset_controller_register - resource managed reset_controller_register()
 * @dev: device that is registering this reset controller
 * @rcdev: a pointer to the initialized reset controller device
 *
 * Managed reset_controller_register(). For reset controllers registered by
 * this function, reset_controller_unregister() is automatically called on
 * driver detach. See reset_controller_register() for more information.
 */
int devm_reset_controller_register(struct device *dev,
				   struct reset_controller_dev *rcdev)
{
	struct reset_controller_dev **rcdevp;
	int ret;

	rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
			      GFP_KERNEL);
	if (!rcdevp)
		return -ENOMEM;

	ret = reset_controller_register(rcdev);
	if (ret) {
		devres_free(rcdevp);
		return ret;
	}

	*rcdevp = rcdev;
	devres_add(dev, rcdevp);

	return ret;
}
EXPORT_SYMBOL_GPL(devm_reset_controller_register);

/**
 * reset_controller_add_lookup - register a set of lookup entries
 * @lookup: array of reset lookup entries
 * @num_entries: number of entries in the lookup array
 */
void reset_controller_add_lookup(struct reset_control_lookup *lookup,
				 unsigned int num_entries)
{
	struct reset_control_lookup *entry;
	unsigned int i;

	mutex_lock(&reset_lookup_mutex);
	for (i = 0; i < num_entries; i++) {
		entry = &lookup[i];

		if (!entry->dev_id || !entry->provider) {
			pr_warn("%s(): reset lookup entry badly specified, skipping\n",
				__func__);
			continue;
		}

		list_add_tail(&entry->list, &reset_lookup_list);
	}
	mutex_unlock(&reset_lookup_mutex);
}
EXPORT_SYMBOL_GPL(reset_controller_add_lookup);

static inline struct reset_control_array *
rstc_to_array(struct reset_control *rstc) {
	return container_of(rstc, struct reset_control_array, base);
}

static int reset_control_array_reset(struct reset_control_array *resets)
{
	int ret, i;

	for (i = 0; i < resets->num_rstcs; i++) {
		ret = reset_control_reset(resets->rstc[i]);
		if (ret)
			return ret;
	}

	return 0;
}

static int reset_control_array_rearm(struct reset_control_array *resets)
{
	struct reset_control *rstc;
	int i;

	for (i = 0; i < resets->num_rstcs; i++) {
		rstc = resets->rstc[i];

		if (!rstc)
			continue;

		if (WARN_ON(IS_ERR(rstc)))
			return -EINVAL;

		if (rstc->shared) {
			if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
				return -EINVAL;
		} else {
			if (!rstc->acquired)
				return -EPERM;
		}
	}

	for (i = 0; i < resets->num_rstcs; i++) {
		rstc = resets->rstc[i];

		if (rstc && rstc->shared)
			WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
	}

	return 0;
}

static int reset_control_array_assert(struct reset_control_array *resets)
{
	int ret, i;

	for (i = 0; i < resets->num_rstcs; i++) {
		ret = reset_control_assert(resets->rstc[i]);
		if (ret)
			goto err;
	}

	return 0;

err:
	while (i--)
		reset_control_deassert(resets->rstc[i]);
	return ret;
}

static int reset_control_array_deassert(struct reset_control_array *resets)
{
	int ret, i;

	for (i = 0; i < resets->num_rstcs; i++) {
		ret = reset_control_deassert(resets->rstc[i]);
		if (ret)
			goto err;
	}

	return 0;

err:
	while (i--)
		reset_control_assert(resets->rstc[i]);
	return ret;
}

static int reset_control_array_acquire(struct reset_control_array *resets)
{
	unsigned int i;
	int err;

	for (i = 0; i < resets->num_rstcs; i++) {
		err = reset_control_acquire(resets->rstc[i]);
		if (err < 0)
			goto release;
	}

	return 0;

release:
	while (i--)
		reset_control_release(resets->rstc[i]);

	return err;
}

static void reset_control_array_release(struct reset_control_array *resets)
{
	unsigned int i;

	for (i = 0; i < resets->num_rstcs; i++)
		reset_control_release(resets->rstc[i]);
}

static inline bool reset_control_is_array(struct reset_control *rstc)
{
	return rstc->array;
}

/**
 * reset_control_reset - reset the controlled device
 * @rstc: reset controller
 *
 * On a shared reset line the actual reset pulse is only triggered once for the
 * lifetime of the reset_control instance: for all but the first caller this is
 * a no-op.
 * Consumers must not use reset_control_(de)assert on shared reset lines when
 * reset_control_reset has been used.
 *
 * If rstc is NULL it is an optional reset and the function will just
 * return 0.
 */
int reset_control_reset(struct reset_control *rstc)
{
	int ret;

	if (!rstc)
		return 0;

	if (WARN_ON(IS_ERR(rstc)))
		return -EINVAL;

	if (reset_control_is_array(rstc))
		return reset_control_array_reset(rstc_to_array(rstc));

	if (!rstc->rcdev->ops->reset)
		return -ENOTSUPP;

	if (rstc->shared) {
		if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
			return -EINVAL;

		if (atomic_inc_return(&rstc->triggered_count) != 1)
			return 0;
	} else {
		if (!rstc->acquired)
			return -EPERM;
	}

	ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
	if (rstc->shared && ret)
		atomic_dec(&rstc->triggered_count);

	return ret;
}
EXPORT_SYMBOL_GPL(reset_control_reset);

/**
 * reset_control_bulk_reset - reset the controlled devices in order
 * @num_rstcs: number of entries in rstcs array
 * @rstcs: array of struct reset_control_bulk_data with reset controls set
 *
 * Issue a reset on all provided reset controls, in order.
 *
 * See also: reset_control_reset()
 */
int reset_control_bulk_reset(int num_rstcs,
			     struct reset_control_bulk_data *rstcs)
{
	int ret, i;

	for (i = 0; i < num_rstcs; i++) {
		ret = reset_control_reset(rstcs[i].rstc);
		if (ret)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_reset);

/**
 * reset_control_rearm - allow shared reset line to be re-triggered"
 * @rstc: reset controller
 *
 * On a shared reset line the actual reset pulse is only triggered once for the
 * lifetime of the reset_control instance, except if this call is used.
 *
 * Calls to this function must be balanced with calls to reset_control_reset,
 * a warning is thrown in case triggered_count ever dips below 0.
 *
 * Consumers must not use reset_control_(de)assert on shared reset lines when
 * reset_control_reset or reset_control_rearm have been used.
 *
 * If rstc is NULL the function will just return 0.
 */
int reset_control_rearm(struct reset_control *rstc)
{
	if (!rstc)
		return 0;

	if (WARN_ON(IS_ERR(rstc)))
		return -EINVAL;

	if (reset_control_is_array(rstc))
		return reset_control_array_rearm(rstc_to_array(rstc));

	if (rstc->shared) {
		if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
			return -EINVAL;

		WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
	} else {
		if (!rstc->acquired)
			return -EPERM;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(reset_control_rearm);

/**
 * reset_control_assert - asserts the reset line
 * @rstc: reset controller
 *
 * Calling this on an exclusive reset controller guarantees that the reset
 * will be asserted. When called on a shared reset controller the line may
 * still be deasserted, as long as other users keep it so.
 *
 * For shared reset controls a driver cannot expect the hw's registers and
 * internal state to be reset, but must be prepared for this to happen.
 * Consumers must not use reset_control_reset on shared reset lines when
 * reset_control_(de)assert has been used.
 *
 * If rstc is NULL it is an optional reset and the function will just
 * return 0.
 */
int reset_control_assert(struct reset_control *rstc)
{
	if (!rstc)
		return 0;

	if (WARN_ON(IS_ERR(rstc)))
		return -EINVAL;

	if (reset_control_is_array(rstc))
		return reset_control_array_assert(rstc_to_array(rstc));

	if (rstc->shared) {
		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
			return -EINVAL;

		if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
			return -EINVAL;

		if (atomic_dec_return(&rstc->deassert_count) != 0)
			return 0;

		/*
		 * Shared reset controls allow the reset line to be in any state
		 * after this call, so doing nothing is a valid option.
		 */
		if (!rstc->rcdev->ops->assert)
			return 0;
	} else {
		/*
		 * If the reset controller does not implement .assert(), there
		 * is no way to guarantee that the reset line is asserted after
		 * this call.
		 */
		if (!rstc->rcdev->ops->assert)
			return -ENOTSUPP;

		if (!rstc->acquired) {
			WARN(1, "reset %s (ID: %u) is not acquired\n",
			     rcdev_name(rstc->rcdev), rstc->id);
			return -EPERM;
		}
	}

	return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
}
EXPORT_SYMBOL_GPL(reset_control_assert);

/**
 * reset_control_bulk_assert - asserts the reset lines in order
 * @num_rstcs: number of entries in rstcs array
 * @rstcs: array of struct reset_control_bulk_data with reset controls set
 *
 * Assert the reset lines for all provided reset controls, in order.
 * If an assertion fails, already asserted resets are deasserted again.
 *
 * See also: reset_control_assert()
 */
int reset_control_bulk_assert(int num_rstcs,
			      struct reset_control_bulk_data *rstcs)
{
	int ret, i;

	for (i = 0; i < num_rstcs; i++) {
		ret = reset_control_assert(rstcs[i].rstc);
		if (ret)
			goto err;
	}

	return 0;

err:
	while (i--)
		reset_control_deassert(rstcs[i].rstc);
	return ret;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_assert);

/**
 * reset_control_deassert - deasserts the reset line
 * @rstc: reset controller
 *
 * After calling this function, the reset is guaranteed to be deasserted.
 * Consumers must not use reset_control_reset on shared reset lines when
 * reset_control_(de)assert has been used.
 *
 * If rstc is NULL it is an optional reset and the function will just
 * return 0.
 */
int reset_control_deassert(struct reset_control *rstc)
{
	if (!rstc)
		return 0;

	if (WARN_ON(IS_ERR(rstc)))
		return -EINVAL;

	if (reset_control_is_array(rstc))
		return reset_control_array_deassert(rstc_to_array(rstc));

	if (rstc->shared) {
		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
			return -EINVAL;

		if (atomic_inc_return(&rstc->deassert_count) != 1)
			return 0;
	} else {
		if (!rstc->acquired) {
			WARN(1, "reset %s (ID: %u) is not acquired\n",
			     rcdev_name(rstc->rcdev), rstc->id);
			return -EPERM;
		}
	}

	/*
	 * If the reset controller does not implement .deassert(), we assume
	 * that it handles self-deasserting reset lines via .reset(). In that
	 * case, the reset lines are deasserted by default. If that is not the
	 * case, the reset controller driver should implement .deassert() and
	 * return -ENOTSUPP.
	 */
	if (!rstc->rcdev->ops->deassert)
		return 0;

	return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
}
EXPORT_SYMBOL_GPL(reset_control_deassert);

/**
 * reset_control_bulk_deassert - deasserts the reset lines in reverse order
 * @num_rstcs: number of entries in rstcs array
 * @rstcs: array of struct reset_control_bulk_data with reset controls set
 *
 * Deassert the reset lines for all provided reset controls, in reverse order.
 * If a deassertion fails, already deasserted resets are asserted again.
 *
 * See also: reset_control_deassert()
 */
int reset_control_bulk_deassert(int num_rstcs,
				struct reset_control_bulk_data *rstcs)
{
	int ret, i;

	for (i = num_rstcs - 1; i >= 0; i--) {
		ret = reset_control_deassert(rstcs[i].rstc);
		if (ret)
			goto err;
	}

	return 0;

err:
	while (i < num_rstcs)
		reset_control_assert(rstcs[i++].rstc);
	return ret;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);

/**
 * reset_control_status - returns a negative errno if not supported, a
 * positive value if the reset line is asserted, or zero if the reset
 * line is not asserted or if the desc is NULL (optional reset).
 * @rstc: reset controller
 */
int reset_control_status(struct reset_control *rstc)
{
	if (!rstc)
		return 0;

	if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
		return -EINVAL;

	if (rstc->rcdev->ops->status)
		return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);

	return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(reset_control_status);

/**
 * reset_control_acquire() - acquires a reset control for exclusive use
 * @rstc: reset control
 *
 * This is used to explicitly acquire a reset control for exclusive use. Note
 * that exclusive resets are requested as acquired by default. In order for a
 * second consumer to be able to control the reset, the first consumer has to
 * release it first. Typically the easiest way to achieve this is to call the
 * reset_control_get_exclusive_released() to obtain an instance of the reset
 * control. Such reset controls are not acquired by default.
 *
 * Consumers implementing shared access to an exclusive reset need to follow
 * a specific protocol in order to work together. Before consumers can change
 * a reset they must acquire exclusive access using reset_control_acquire().
 * After they are done operating the reset, they must release exclusive access
 * with a call to reset_control_release(). Consumers are not granted exclusive
 * access to the reset as long as another consumer hasn't released a reset.
 *
 * See also: reset_control_release()
 */
int reset_control_acquire(struct reset_control *rstc)
{
	struct reset_control *rc;

	if (!rstc)
		return 0;

	if (WARN_ON(IS_ERR(rstc)))
		return -EINVAL;

	if (reset_control_is_array(rstc))
		return reset_control_array_acquire(rstc_to_array(rstc));

	mutex_lock(&reset_list_mutex);

	if (rstc->acquired) {
		mutex_unlock(&reset_list_mutex);
		return 0;
	}

	list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
		if (rstc != rc && rstc->id == rc->id) {
			if (rc->acquired) {
				mutex_unlock(&reset_list_mutex);
				return -EBUSY;
			}
		}
	}

	rstc->acquired = true;

	mutex_unlock(&reset_list_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(reset_control_acquire);

/**
 * reset_control_bulk_acquire - acquires reset controls for exclusive use
 * @num_rstcs: number of entries in rstcs array
 * @rstcs: array of struct reset_control_bulk_data with reset controls set
 *
 * This is used to explicitly acquire reset controls requested with
 * reset_control_bulk_get_exclusive_release() for temporary exclusive use.
 *
 * See also: reset_control_acquire(), reset_control_bulk_release()
 */
int reset_control_bulk_acquire(int num_rstcs,
			       struct reset_control_bulk_data *rstcs)
{
	int ret, i;

	for (i = 0; i < num_rstcs; i++) {
		ret = reset_control_acquire(rstcs[i].rstc);
		if (ret)
			goto err;
	}

	return 0;

err:
	while (i--)
		reset_control_release(rstcs[i].rstc);
	return ret;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);

/**
 * reset_control_release() - releases exclusive access to a reset control
 * @rstc: reset control
 *
 * Releases exclusive access right to a reset control previously obtained by a
 * call to reset_control_acquire(). Until a consumer calls this function, no
 * other consumers will be granted exclusive access.
 *
 * See also: reset_control_acquire()
 */
void reset_control_release(struct reset_control *rstc)
{
	if (!rstc || WARN_ON(IS_ERR(rstc)))
		return;

	if (reset_control_is_array(rstc))
		reset_control_array_release(rstc_to_array(rstc));
	else
		rstc->acquired = false;
}
EXPORT_SYMBOL_GPL(reset_control_release);

/**
 * reset_control_bulk_release() - releases exclusive access to reset controls
 * @num_rstcs: number of entries in rstcs array
 * @rstcs: array of struct reset_control_bulk_data with reset controls set
 *
 * Releases exclusive access right to reset controls previously obtained by a
 * call to reset_control_bulk_acquire().
 *
 * See also: reset_control_release(), reset_control_bulk_acquire()
 */
void reset_control_bulk_release(int num_rstcs,
				struct reset_control_bulk_data *rstcs)
{
	int i;

	for (i = 0; i < num_rstcs; i++)
		reset_control_release(rstcs[i].rstc);
}
EXPORT_SYMBOL_GPL(reset_control_bulk_release);

static struct reset_control *
__reset_control_get_internal(struct reset_controller_dev *rcdev,
			     unsigned int index, bool shared, bool acquired)
{
	struct reset_control *rstc;

	lockdep_assert_held(&reset_list_mutex);

	list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
		if (rstc->id == index) {
			/*
			 * Allow creating a secondary exclusive reset_control
			 * that is initially not acquired for an already
			 * controlled reset line.
			 */
			if (!rstc->shared && !shared && !acquired)
				break;

			if (WARN_ON(!rstc->shared || !shared))
				return ERR_PTR(-EBUSY);

			kref_get(&rstc->refcnt);
			return rstc;
		}
	}

	rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
	if (!rstc)
		return ERR_PTR(-ENOMEM);

	if (!try_module_get(rcdev->owner)) {
		kfree(rstc);
		return ERR_PTR(-ENODEV);
	}

	rstc->rcdev = rcdev;
	list_add(&rstc->list, &rcdev->reset_control_head);
	rstc->id = index;
	kref_init(&rstc->refcnt);
	rstc->acquired = acquired;
	rstc->shared = shared;

	return rstc;
}

static void __reset_control_release(struct kref *kref)
{
	struct reset_control *rstc = container_of(kref, struct reset_control,
						  refcnt);

	lockdep_assert_held(&reset_list_mutex);

	module_put(rstc->rcdev->owner);

	list_del(&rstc->list);
	kfree(rstc);
}

static void __reset_control_put_internal(struct reset_control *rstc)
{
	lockdep_assert_held(&reset_list_mutex);

	if (IS_ERR_OR_NULL(rstc))
		return;

	kref_put(&rstc->refcnt, __reset_control_release);
}

static int __reset_add_reset_gpio_lookup(int id, struct device_node *np,
					 unsigned int gpio,
					 unsigned int of_flags)
{
	const struct fwnode_handle *fwnode = of_fwnode_handle(np);
	unsigned int lookup_flags;
	const char *label_tmp;

	/*
	 * Later we map GPIO flags between OF and Linux, however not all
	 * constants from include/dt-bindings/gpio/gpio.h and
	 * include/linux/gpio/machine.h match each other.
	 */
	if (of_flags > GPIO_ACTIVE_LOW) {
		pr_err("reset-gpio code does not support GPIO flags %u for GPIO %u\n",
		       of_flags, gpio);
		return -EINVAL;
	}

	struct gpio_device *gdev __free(gpio_device_put) = gpio_device_find_by_fwnode(fwnode);
	if (!gdev)
		return -EPROBE_DEFER;

	label_tmp = gpio_device_get_label(gdev);
	if (!label_tmp)
		return -EINVAL;

	char *label __free(kfree) = kstrdup(label_tmp, GFP_KERNEL);
	if (!label)
		return -ENOMEM;

	/* Size: one lookup entry plus sentinel */
	struct gpiod_lookup_table *lookup __free(kfree) = kzalloc(struct_size(lookup, table, 2),
								  GFP_KERNEL);
	if (!lookup)
		return -ENOMEM;

	lookup->dev_id = kasprintf(GFP_KERNEL, "reset-gpio.%d", id);
	if (!lookup->dev_id)
		return -ENOMEM;

	lookup_flags = GPIO_PERSISTENT;
	lookup_flags |= of_flags & GPIO_ACTIVE_LOW;
	lookup->table[0] = GPIO_LOOKUP(no_free_ptr(label), gpio, "reset",
				       lookup_flags);

	/* Not freed on success, because it is persisent subsystem data. */
	gpiod_add_lookup_table(no_free_ptr(lookup));

	return 0;
}

/*
 * @args:	phandle to the GPIO provider with all the args like GPIO number
 */
static int __reset_add_reset_gpio_device(const struct of_phandle_args *args)
{
	struct reset_gpio_lookup *rgpio_dev;
	struct platform_device *pdev;
	int id, ret;

	/*
	 * Currently only #gpio-cells=2 is supported with the meaning of:
	 * args[0]: GPIO number
	 * args[1]: GPIO flags
	 * TODO: Handle other cases.
	 */
	if (args->args_count != 2)
		return -ENOENT;

	/*
	 * Registering reset-gpio device might cause immediate
	 * bind, resulting in its probe() registering new reset controller thus
	 * taking reset_list_mutex lock via reset_controller_register().
	 */
	lockdep_assert_not_held(&reset_list_mutex);

	mutex_lock(&reset_gpio_lookup_mutex);

	list_for_each_entry(rgpio_dev, &reset_gpio_lookup_list, list) {
		if (args->np == rgpio_dev->of_args.np) {
			if (of_phandle_args_equal(args, &rgpio_dev->of_args))
				goto out; /* Already on the list, done */
		}
	}

	id = ida_alloc(&reset_gpio_ida, GFP_KERNEL);
	if (id < 0) {
		ret = id;
		goto err_unlock;
	}

	/* Not freed on success, because it is persisent subsystem data. */
	rgpio_dev = kzalloc(sizeof(*rgpio_dev), GFP_KERNEL);
	if (!rgpio_dev) {
		ret = -ENOMEM;
		goto err_ida_free;
	}

	ret = __reset_add_reset_gpio_lookup(id, args->np, args->args[0],
					    args->args[1]);
	if (ret < 0)
		goto err_kfree;

	rgpio_dev->of_args = *args;
	/*
	 * We keep the device_node reference, but of_args.np is put at the end
	 * of __of_reset_control_get(), so get it one more time.
	 * Hold reference as long as rgpio_dev memory is valid.
	 */
	of_node_get(rgpio_dev->of_args.np);
	pdev = platform_device_register_data(NULL, "reset-gpio", id,
					     &rgpio_dev->of_args,
					     sizeof(rgpio_dev->of_args));
	ret = PTR_ERR_OR_ZERO(pdev);
	if (ret)
		goto err_put;

	list_add(&rgpio_dev->list, &reset_gpio_lookup_list);

out:
	mutex_unlock(&reset_gpio_lookup_mutex);

	return 0;

err_put:
	of_node_put(rgpio_dev->of_args.np);
err_kfree:
	kfree(rgpio_dev);
err_ida_free:
	ida_free(&reset_gpio_ida, id);
err_unlock:
	mutex_unlock(&reset_gpio_lookup_mutex);

	return ret;
}

static struct reset_controller_dev *__reset_find_rcdev(const struct of_phandle_args *args,
						       bool gpio_fallback)
{
	struct reset_controller_dev *rcdev;

	lockdep_assert_held(&reset_list_mutex);

	list_for_each_entry(rcdev, &reset_controller_list, list) {
		if (gpio_fallback) {
			if (rcdev->of_args && of_phandle_args_equal(args,
								    rcdev->of_args))
				return rcdev;
		} else {
			if (args->np == rcdev->of_node)
				return rcdev;
		}
	}

	return NULL;
}

struct reset_control *
__of_reset_control_get(struct device_node *node, const char *id, int index,
		       bool shared, bool optional, bool acquired)
{
	bool gpio_fallback = false;
	struct reset_control *rstc;
	struct reset_controller_dev *rcdev;
	struct of_phandle_args args;
	int rstc_id;
	int ret;

	if (!node)
		return ERR_PTR(-EINVAL);

	if (id) {
		index = of_property_match_string(node,
						 "reset-names", id);
		if (index == -EILSEQ)
			return ERR_PTR(index);
		if (index < 0)
			return optional ? NULL : ERR_PTR(-ENOENT);
	}

	ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
					 index, &args);
	if (ret == -EINVAL)
		return ERR_PTR(ret);
	if (ret) {
		if (!IS_ENABLED(CONFIG_RESET_GPIO))
			return optional ? NULL : ERR_PTR(ret);

		/*
		 * There can be only one reset-gpio for regular devices, so
		 * don't bother with the "reset-gpios" phandle index.
		 */
		ret = of_parse_phandle_with_args(node, "reset-gpios", "#gpio-cells",
						 0, &args);
		if (ret)
			return optional ? NULL : ERR_PTR(ret);

		gpio_fallback = true;

		ret = __reset_add_reset_gpio_device(&args);
		if (ret) {
			rstc = ERR_PTR(ret);
			goto out_put;
		}
	}

	mutex_lock(&reset_list_mutex);
	rcdev = __reset_find_rcdev(&args, gpio_fallback);
	if (!rcdev) {
		rstc = ERR_PTR(-EPROBE_DEFER);
		goto out_unlock;
	}

	if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
		rstc = ERR_PTR(-EINVAL);
		goto out_unlock;
	}

	rstc_id = rcdev->of_xlate(rcdev, &args);
	if (rstc_id < 0) {
		rstc = ERR_PTR(rstc_id);
		goto out_unlock;
	}

	/* reset_list_mutex also protects the rcdev's reset_control list */
	rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);

out_unlock:
	mutex_unlock(&reset_list_mutex);
out_put:
	of_node_put(args.np);

	return rstc;
}
EXPORT_SYMBOL_GPL(__of_reset_control_get);

static struct reset_controller_dev *
__reset_controller_by_name(const char *name)
{
	struct reset_controller_dev *rcdev;

	lockdep_assert_held(&reset_list_mutex);

	list_for_each_entry(rcdev, &reset_controller_list, list) {
		if (!rcdev->dev)
			continue;

		if (!strcmp(name, dev_name(rcdev->dev)))
			return rcdev;
	}

	return NULL;
}

static struct reset_control *
__reset_control_get_from_lookup(struct device *dev, const char *con_id,
				bool shared, bool optional, bool acquired)
{
	const struct reset_control_lookup *lookup;
	struct reset_controller_dev *rcdev;
	const char *dev_id = dev_name(dev);
	struct reset_control *rstc = NULL;

	mutex_lock(&reset_lookup_mutex);

	list_for_each_entry(lookup, &reset_lookup_list, list) {
		if (strcmp(lookup->dev_id, dev_id))
			continue;

		if ((!con_id && !lookup->con_id) ||
		    ((con_id && lookup->con_id) &&
		     !strcmp(con_id, lookup->con_id))) {
			mutex_lock(&reset_list_mutex);
			rcdev = __reset_controller_by_name(lookup->provider);
			if (!rcdev) {
				mutex_unlock(&reset_list_mutex);
				mutex_unlock(&reset_lookup_mutex);
				/* Reset provider may not be ready yet. */
				return ERR_PTR(-EPROBE_DEFER);
			}

			rstc = __reset_control_get_internal(rcdev,
							    lookup->index,
							    shared, acquired);
			mutex_unlock(&reset_list_mutex);
			break;
		}
	}

	mutex_unlock(&reset_lookup_mutex);

	if (!rstc)
		return optional ? NULL : ERR_PTR(-ENOENT);

	return rstc;
}

struct reset_control *__reset_control_get(struct device *dev, const char *id,
					  int index, bool shared, bool optional,
					  bool acquired)
{
	if (WARN_ON(shared && acquired))
		return ERR_PTR(-EINVAL);

	if (dev->of_node)
		return __of_reset_control_get(dev->of_node, id, index, shared,
					      optional, acquired);

	return __reset_control_get_from_lookup(dev, id, shared, optional,
					       acquired);
}
EXPORT_SYMBOL_GPL(__reset_control_get);

int __reset_control_bulk_get(struct device *dev, int num_rstcs,
			     struct reset_control_bulk_data *rstcs,
			     bool shared, bool optional, bool acquired)
{
	int ret, i;

	for (i = 0; i < num_rstcs; i++) {
		rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
						    shared, optional, acquired);
		if (IS_ERR(rstcs[i].rstc)) {
			ret = PTR_ERR(rstcs[i].rstc);
			goto err;
		}
	}

	return 0;

err:
	mutex_lock(&reset_list_mutex);
	while (i--)
		__reset_control_put_internal(rstcs[i].rstc);
	mutex_unlock(&reset_list_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(__reset_control_bulk_get);

static void reset_control_array_put(struct reset_control_array *resets)
{
	int i;

	mutex_lock(&reset_list_mutex);
	for (i = 0; i < resets->num_rstcs; i++)
		__reset_control_put_internal(resets->rstc[i]);
	mutex_unlock(&reset_list_mutex);
	kfree(resets);
}

/**
 * reset_control_put - free the reset controller
 * @rstc: reset controller
 */
void reset_control_put(struct reset_control *rstc)
{
	if (IS_ERR_OR_NULL(rstc))
		return;

	if (reset_control_is_array(rstc)) {
		reset_control_array_put(rstc_to_array(rstc));
		return;
	}

	mutex_lock(&reset_list_mutex);
	__reset_control_put_internal(rstc);
	mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_put);

/**
 * reset_control_bulk_put - free the reset controllers
 * @num_rstcs: number of entries in rstcs array
 * @rstcs: array of struct reset_control_bulk_data with reset controls set
 */
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
{
	mutex_lock(&reset_list_mutex);
	while (num_rstcs--)
		__reset_control_put_internal(rstcs[num_rstcs].rstc);
	mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_bulk_put);

static void devm_reset_control_release(struct device *dev, void *res)
{
	reset_control_put(*(struct reset_control **)res);
}

struct reset_control *
__devm_reset_control_get(struct device *dev, const char *id, int index,
			 bool shared, bool optional, bool acquired)
{
	struct reset_control **ptr, *rstc;

	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return ERR_PTR(-ENOMEM);

	rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
	if (IS_ERR_OR_NULL(rstc)) {
		devres_free(ptr);
		return rstc;
	}

	*ptr = rstc;
	devres_add(dev, ptr);

	return rstc;
}
EXPORT_SYMBOL_GPL(__devm_reset_control_get);

struct reset_control_bulk_devres {
	int num_rstcs;
	struct reset_control_bulk_data *rstcs;
};

static void devm_reset_control_bulk_release(struct device *dev, void *res)
{
	struct reset_control_bulk_devres *devres = res;

	reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
}

int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
				  struct reset_control_bulk_data *rstcs,
				  bool shared, bool optional, bool acquired)
{
	struct reset_control_bulk_devres *ptr;
	int ret;

	ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	ptr->num_rstcs = num_rstcs;
	ptr->rstcs = rstcs;
	devres_add(dev, ptr);

	return 0;
}
EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);

/**
 * __device_reset - find reset controller associated with the device
 *                  and perform reset
 * @dev: device to be reset by the controller
 * @optional: whether it is optional to reset the device
 *
 * Convenience wrapper for __reset_control_get() and reset_control_reset().
 * This is useful for the common case of devices with single, dedicated reset
 * lines. _RST firmware method will be called for devices with ACPI.
 */
int __device_reset(struct device *dev, bool optional)
{
	struct reset_control *rstc;
	int ret;

#ifdef CONFIG_ACPI
	acpi_handle handle = ACPI_HANDLE(dev);

	if (handle) {
		if (!acpi_has_method(handle, "_RST"))
			return optional ? 0 : -ENOENT;
		if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL,
						      NULL)))
			return -EIO;
	}
#endif

	rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
	if (IS_ERR(rstc))
		return PTR_ERR(rstc);

	ret = reset_control_reset(rstc);

	reset_control_put(rstc);

	return ret;
}
EXPORT_SYMBOL_GPL(__device_reset);

/*
 * APIs to manage an array of reset controls.
 */

/**
 * of_reset_control_get_count - Count number of resets available with a device
 *
 * @node: device node that contains 'resets'.
 *
 * Returns positive reset count on success, or error number on failure and
 * on count being zero.
 */
static int of_reset_control_get_count(struct device_node *node)
{
	int count;

	if (!node)
		return -EINVAL;

	count = of_count_phandle_with_args(node, "resets", "#reset-cells");
	if (count == 0)
		count = -ENOENT;

	return count;
}

/**
 * of_reset_control_array_get - Get a list of reset controls using
 *				device node.
 *
 * @np: device node for the device that requests the reset controls array
 * @shared: whether reset controls are shared or not
 * @optional: whether it is optional to get the reset controls
 * @acquired: only one reset control may be acquired for a given controller
 *            and ID
 *
 * Returns pointer to allocated reset_control on success or error on failure
 */
struct reset_control *
of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
			   bool acquired)
{
	struct reset_control_array *resets;
	struct reset_control *rstc;
	int num, i;

	num = of_reset_control_get_count(np);
	if (num < 0)
		return optional ? NULL : ERR_PTR(num);

	resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
	if (!resets)
		return ERR_PTR(-ENOMEM);
	resets->num_rstcs = num;

	for (i = 0; i < num; i++) {
		rstc = __of_reset_control_get(np, NULL, i, shared, optional,
					      acquired);
		if (IS_ERR(rstc))
			goto err_rst;
		resets->rstc[i] = rstc;
	}
	resets->base.array = true;

	return &resets->base;

err_rst:
	mutex_lock(&reset_list_mutex);
	while (--i >= 0)
		__reset_control_put_internal(resets->rstc[i]);
	mutex_unlock(&reset_list_mutex);

	kfree(resets);

	return rstc;
}
EXPORT_SYMBOL_GPL(of_reset_control_array_get);

/**
 * devm_reset_control_array_get - Resource managed reset control array get
 *
 * @dev: device that requests the list of reset controls
 * @shared: whether reset controls are shared or not
 * @optional: whether it is optional to get the reset controls
 *
 * The reset control array APIs are intended for a list of resets
 * that just have to be asserted or deasserted, without any
 * requirements on the order.
 *
 * Returns pointer to allocated reset_control on success or error on failure
 */
struct reset_control *
devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
{
	struct reset_control **ptr, *rstc;

	ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return ERR_PTR(-ENOMEM);

	rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
	if (IS_ERR_OR_NULL(rstc)) {
		devres_free(ptr);
		return rstc;
	}

	*ptr = rstc;
	devres_add(dev, ptr);

	return rstc;
}
EXPORT_SYMBOL_GPL(devm_reset_control_array_get);

static int reset_control_get_count_from_lookup(struct device *dev)
{
	const struct reset_control_lookup *lookup;
	const char *dev_id;
	int count = 0;

	if (!dev)
		return -EINVAL;

	dev_id = dev_name(dev);
	mutex_lock(&reset_lookup_mutex);

	list_for_each_entry(lookup, &reset_lookup_list, list) {
		if (!strcmp(lookup->dev_id, dev_id))
			count++;
	}

	mutex_unlock(&reset_lookup_mutex);

	if (count == 0)
		count = -ENOENT;

	return count;
}

/**
 * reset_control_get_count - Count number of resets available with a device
 *
 * @dev: device for which to return the number of resets
 *
 * Returns positive reset count on success, or error number on failure and
 * on count being zero.
 */
int reset_control_get_count(struct device *dev)
{
	if (dev->of_node)
		return of_reset_control_get_count(dev->of_node);

	return reset_control_get_count_from_lookup(dev);
}
EXPORT_SYMBOL_GPL(reset_control_get_count);