Contributors: 8
Author Tokens Token Proportion Commits Commit Proportion
Sergei Shtylyov 1626 92.28% 4 25.00%
Kazuya Mizuguchi 59 3.35% 2 12.50%
Jacob E Keller 42 2.38% 3 18.75%
Biju Das 26 1.48% 3 18.75%
Phil Edworthy 3 0.17% 1 6.25%
Kuninori Morimoto 2 0.11% 1 6.25%
Richard Cochran 2 0.11% 1 6.25%
Dan Carpenter 2 0.11% 1 6.25%
Total 1762 16


// SPDX-License-Identifier: GPL-2.0+
/* PTP 1588 clock using the Renesas Ethernet AVB
 *
 * Copyright (C) 2013-2015 Renesas Electronics Corporation
 * Copyright (C) 2015 Renesas Solutions Corp.
 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
 */

#include "ravb.h"

static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
{
	struct net_device *ndev = priv->ndev;
	int error;

	error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
	if (error)
		return error;

	ravb_modify(ndev, GCCR, request, request);
	return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
}

/* Caller must hold the lock */
static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
{
	struct net_device *ndev = priv->ndev;
	int error;

	error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
	if (error)
		return error;

	ts->tv_nsec = ravb_read(ndev, GCT0);
	ts->tv_sec  = ravb_read(ndev, GCT1) |
		((s64)ravb_read(ndev, GCT2) << 32);

	return 0;
}

/* Caller must hold the lock */
static int ravb_ptp_time_write(struct ravb_private *priv,
				const struct timespec64 *ts)
{
	struct net_device *ndev = priv->ndev;
	int error;
	u32 gccr;

	error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
	if (error)
		return error;

	gccr = ravb_read(ndev, GCCR);
	if (gccr & GCCR_LTO)
		return -EBUSY;
	ravb_write(ndev, ts->tv_nsec, GTO0);
	ravb_write(ndev, ts->tv_sec,  GTO1);
	ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
	ravb_write(ndev, gccr | GCCR_LTO, GCCR);

	return 0;
}

/* Caller must hold the lock */
static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
{
	struct net_device *ndev = priv->ndev;
	/* When the comparison value (GPTC.PTCV) is in range of
	 * [x-1 to x+1] (x is the configured increment value in
	 * GTI.TIV), it may happen that a comparison match is
	 * not detected when the timer wraps around.
	 */
	u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
	u32 gccr;

	if (ns < gti_ns_plus_1)
		ns = gti_ns_plus_1;
	else if (ns > 0 - gti_ns_plus_1)
		ns = 0 - gti_ns_plus_1;

	gccr = ravb_read(ndev, GCCR);
	if (gccr & GCCR_LPTC)
		return -EBUSY;
	ravb_write(ndev, ns, GPTC);
	ravb_write(ndev, gccr | GCCR_LPTC, GCCR);

	return 0;
}

/* PTP clock operations */
static int ravb_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
	struct ravb_private *priv = container_of(ptp, struct ravb_private,
						 ptp.info);
	struct net_device *ndev = priv->ndev;
	unsigned long flags;
	u32 addend;
	u32 gccr;

	addend = (u32)adjust_by_scaled_ppm(priv->ptp.default_addend,
					   scaled_ppm);

	spin_lock_irqsave(&priv->lock, flags);

	priv->ptp.current_addend = addend;

	gccr = ravb_read(ndev, GCCR);
	if (gccr & GCCR_LTI) {
		spin_unlock_irqrestore(&priv->lock, flags);
		return -EBUSY;
	}
	ravb_write(ndev, addend & GTI_TIV, GTI);
	ravb_write(ndev, gccr | GCCR_LTI, GCCR);

	spin_unlock_irqrestore(&priv->lock, flags);

	return 0;
}

static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
	struct ravb_private *priv = container_of(ptp, struct ravb_private,
						 ptp.info);
	struct timespec64 ts;
	unsigned long flags;
	int error;

	spin_lock_irqsave(&priv->lock, flags);
	error = ravb_ptp_time_read(priv, &ts);
	if (!error) {
		u64 now = ktime_to_ns(timespec64_to_ktime(ts));

		ts = ns_to_timespec64(now + delta);
		error = ravb_ptp_time_write(priv, &ts);
	}
	spin_unlock_irqrestore(&priv->lock, flags);

	return error;
}

static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
	struct ravb_private *priv = container_of(ptp, struct ravb_private,
						 ptp.info);
	unsigned long flags;
	int error;

	spin_lock_irqsave(&priv->lock, flags);
	error = ravb_ptp_time_read(priv, ts);
	spin_unlock_irqrestore(&priv->lock, flags);

	return error;
}

static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
			      const struct timespec64 *ts)
{
	struct ravb_private *priv = container_of(ptp, struct ravb_private,
						 ptp.info);
	unsigned long flags;
	int error;

	spin_lock_irqsave(&priv->lock, flags);
	error = ravb_ptp_time_write(priv, ts);
	spin_unlock_irqrestore(&priv->lock, flags);

	return error;
}

static int ravb_ptp_extts(struct ptp_clock_info *ptp,
			  struct ptp_extts_request *req, int on)
{
	struct ravb_private *priv = container_of(ptp, struct ravb_private,
						 ptp.info);
	const struct ravb_hw_info *info = priv->info;
	struct net_device *ndev = priv->ndev;
	unsigned long flags;

	/* Reject requests with unsupported flags */
	if (req->flags & ~(PTP_ENABLE_FEATURE |
			   PTP_RISING_EDGE |
			   PTP_FALLING_EDGE |
			   PTP_STRICT_FLAGS))
		return -EOPNOTSUPP;

	if (req->index)
		return -EINVAL;

	if (priv->ptp.extts[req->index] == on)
		return 0;
	priv->ptp.extts[req->index] = on;

	spin_lock_irqsave(&priv->lock, flags);
	if (!info->irq_en_dis)
		ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
	else if (on)
		ravb_write(ndev, GIE_PTCS, GIE);
	else
		ravb_write(ndev, GID_PTCD, GID);
	spin_unlock_irqrestore(&priv->lock, flags);

	return 0;
}

static int ravb_ptp_perout(struct ptp_clock_info *ptp,
			   struct ptp_perout_request *req, int on)
{
	struct ravb_private *priv = container_of(ptp, struct ravb_private,
						 ptp.info);
	const struct ravb_hw_info *info = priv->info;
	struct net_device *ndev = priv->ndev;
	struct ravb_ptp_perout *perout;
	unsigned long flags;
	int error = 0;

	/* Reject requests with unsupported flags */
	if (req->flags)
		return -EOPNOTSUPP;

	if (req->index)
		return -EINVAL;

	if (on) {
		u64 start_ns;
		u64 period_ns;

		start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
		period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;

		if (start_ns > U32_MAX) {
			netdev_warn(ndev,
				    "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
			return -ERANGE;
		}

		if (period_ns > U32_MAX) {
			netdev_warn(ndev,
				    "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
			return -ERANGE;
		}

		spin_lock_irqsave(&priv->lock, flags);

		perout = &priv->ptp.perout[req->index];
		perout->target = (u32)start_ns;
		perout->period = (u32)period_ns;
		error = ravb_ptp_update_compare(priv, (u32)start_ns);
		if (!error) {
			/* Unmask interrupt */
			if (!info->irq_en_dis)
				ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
			else
				ravb_write(ndev, GIE_PTMS0, GIE);
		}
	} else	{
		spin_lock_irqsave(&priv->lock, flags);

		perout = &priv->ptp.perout[req->index];
		perout->period = 0;

		/* Mask interrupt */
		if (!info->irq_en_dis)
			ravb_modify(ndev, GIC, GIC_PTME, 0);
		else
			ravb_write(ndev, GID_PTMD0, GID);
	}
	spin_unlock_irqrestore(&priv->lock, flags);

	return error;
}

static int ravb_ptp_enable(struct ptp_clock_info *ptp,
			   struct ptp_clock_request *req, int on)
{
	switch (req->type) {
	case PTP_CLK_REQ_EXTTS:
		return ravb_ptp_extts(ptp, &req->extts, on);
	case PTP_CLK_REQ_PEROUT:
		return ravb_ptp_perout(ptp, &req->perout, on);
	default:
		return -EOPNOTSUPP;
	}
}

static const struct ptp_clock_info ravb_ptp_info = {
	.owner		= THIS_MODULE,
	.name		= "ravb clock",
	.max_adj	= 50000000,
	.n_ext_ts	= N_EXT_TS,
	.n_per_out	= N_PER_OUT,
	.adjfine	= ravb_ptp_adjfine,
	.adjtime	= ravb_ptp_adjtime,
	.gettime64	= ravb_ptp_gettime64,
	.settime64	= ravb_ptp_settime64,
	.enable		= ravb_ptp_enable,
};

/* Caller must hold the lock */
void ravb_ptp_interrupt(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	u32 gis = ravb_read(ndev, GIS);

	gis &= ravb_read(ndev, GIC);
	if (gis & GIS_PTCF) {
		struct ptp_clock_event event;

		event.type = PTP_CLOCK_EXTTS;
		event.index = 0;
		event.timestamp = ravb_read(ndev, GCPT);
		ptp_clock_event(priv->ptp.clock, &event);
	}
	if (gis & GIS_PTMF) {
		struct ravb_ptp_perout *perout = priv->ptp.perout;

		if (perout->period) {
			perout->target += perout->period;
			ravb_ptp_update_compare(priv, perout->target);
		}
	}

	ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
}

void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned long flags;

	priv->ptp.info = ravb_ptp_info;

	priv->ptp.default_addend = ravb_read(ndev, GTI);
	priv->ptp.current_addend = priv->ptp.default_addend;

	spin_lock_irqsave(&priv->lock, flags);
	ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
	ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
	spin_unlock_irqrestore(&priv->lock, flags);

	priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
}

void ravb_ptp_stop(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

	ravb_write(ndev, 0, GIC);
	ravb_write(ndev, 0, GIS);

	ptp_clock_unregister(priv->ptp.clock);
}