Release 4.15 drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
  
  
  
/*
 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include "mdp5_kms.h"
#include "mdp5_smp.h"
struct mdp5_smp {
	
struct drm_device *dev;
	
uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
	
int blk_cnt;
	
int blk_size;
	/* register cache */
	
u32 alloc_w[22];
	
u32 alloc_r[22];
	
u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
	
u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
	
u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
};
static inline
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
{
	struct msm_drm_private *priv = smp->dev->dev_private;
	return to_mdp5_kms(to_mdp_kms(priv->kms));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Stephane Viau | 36 | 100.00% | 1 | 100.00% | 
| Total | 36 | 100.00% | 1 | 100.00% | 
static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
{
#define CID_UNUSED	0
	if (WARN_ON(plane >= pipe2nclients(pipe)))
		return CID_UNUSED;
	/*
         * Note on SMP clients:
         * For ViG pipes, fetch Y/Cr/Cb-components clients are always
         * consecutive, and in that order.
         *
         * e.g.:
         * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
         *      Y  plane's client ID is N
         *      Cr plane's client ID is N + 1
         *      Cb plane's client ID is N + 2
         */
	return mdp5_cfg->smp.clients[pipe] + plane;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Stephane Viau | 43 | 93.48% | 2 | 66.67% | 
| Rob Clark | 3 | 6.52% | 1 | 33.33% | 
| Total | 46 | 100.00% | 3 | 100.00% | 
/* allocate blocks for the specified request: */
static int smp_request_block(struct mdp5_smp *smp,
		struct mdp5_smp_state *state,
		u32 cid, int nblks)
{
	void *cs = state->client_state[cid];
	int i, avail, cnt = smp->blk_cnt;
	uint8_t reserved;
	/* we shouldn't be requesting blocks for an in-use client: */
	WARN_ON(bitmap_weight(cs, cnt) > 0);
	reserved = smp->reserved[cid];
	if (reserved) {
		nblks = max(0, nblks - reserved);
		DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
	}
	avail = cnt - bitmap_weight(state->state, cnt);
	if (nblks > avail) {
		dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
				nblks, avail);
		return -ENOSPC;
	}
	for (i = 0; i < nblks; i++) {
		int blk = find_first_zero_bit(state->state, cnt);
		set_bit(blk, cs);
		set_bit(blk, state->state);
	}
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 108 | 59.67% | 2 | 22.22% | 
| Stephane Viau | 67 | 37.02% | 5 | 55.56% | 
| Wentao Xu | 6 | 3.31% | 2 | 22.22% | 
| Total | 181 | 100.00% | 9 | 100.00% | 
static void set_fifo_thresholds(struct mdp5_smp *smp,
		enum mdp5_pipe pipe, int nblks)
{
	u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
	u32 val;
	/* 1/4 of SMP pool that is being fetched */
	val = (nblks * smp_entries_per_blk) / 4;
	smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
	smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
	smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Stephane Viau | 60 | 76.92% | 1 | 50.00% | 
| Archit Taneja | 18 | 23.08% | 1 | 50.00% | 
| Total | 78 | 100.00% | 2 | 100.00% | 
/*
 * NOTE: looks like if horizontal decimation is used (if we supported that)
 * then the width used to calculate SMP block requirements is the post-
 * decimated width.  Ie. SMP buffering sits downstream of decimation (which
 * presumably happens during the dma from scanout buffer).
 */
uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
		const struct mdp_format *format,
		u32 width, bool hdecim)
{
	struct mdp5_kms *mdp5_kms = get_kms(smp);
	int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
	int i, hsub, nplanes, nlines;
	u32 fmt = format->base.pixel_format;
	uint32_t blkcfg = 0;
	nplanes = drm_format_num_planes(fmt);
	hsub = drm_format_horz_chroma_subsampling(fmt);
	/* different if BWC (compressed framebuffer?) enabled: */
	nlines = 2;
	/* Newer MDPs have split/packing logic, which fetches sub-sampled
         * U and V components (splits them from Y if necessary) and packs
         * them together, writes to SMP using a single client.
         */
	if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
		fmt = DRM_FORMAT_NV24;
		nplanes = 2;
		/* if decimation is enabled, HW decimates less on the
                 * sub sampled chroma components
                 */
		if (hdecim && (hsub > 1))
			hsub = 1;
	}
	for (i = 0; i < nplanes; i++) {
		int n, fetch_stride, cpp;
		cpp = drm_format_plane_cpp(fmt, i);
		fetch_stride = width * cpp / (i ? hsub : 1);
		n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
		/* for hw rev v1.00 */
		if (rev == 0)
			n = roundup_pow_of_two(n);
		blkcfg |= (n << (8 * i));
	}
	return blkcfg;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Stephane Viau | 133 | 62.44% | 2 | 40.00% | 
| Wentao Xu | 59 | 27.70% | 1 | 20.00% | 
| Rob Clark | 21 | 9.86% | 2 | 40.00% | 
| Total | 213 | 100.00% | 5 | 100.00% | 
int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
		enum mdp5_pipe pipe, uint32_t blkcfg)
{
	struct mdp5_kms *mdp5_kms = get_kms(smp);
	struct drm_device *dev = mdp5_kms->dev;
	int i, ret;
	for (i = 0; i < pipe2nclients(pipe); i++) {
		u32 cid = pipe2client(pipe, i);
		int n = blkcfg & 0xff;
		if (!n)
			continue;
		DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
		ret = smp_request_block(smp, state, cid, n);
		if (ret) {
			dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
					n, ret);
			return ret;
		}
		blkcfg >>= 8;
	}
	state->assigned |= (1 << pipe);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 90 | 58.82% | 3 | 60.00% | 
| Wentao Xu | 33 | 21.57% | 1 | 20.00% | 
| Stephane Viau | 30 | 19.61% | 1 | 20.00% | 
| Total | 153 | 100.00% | 5 | 100.00% | 
/* Release SMP blocks for all clients of the pipe */
void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
		enum mdp5_pipe pipe)
{
	int i;
	int cnt = smp->blk_cnt;
	for (i = 0; i < pipe2nclients(pipe); i++) {
		u32 cid = pipe2client(pipe, i);
		void *cs = state->client_state[cid];
		/* update global state: */
		bitmap_andnot(state->state, state->state, cs, cnt);
		/* clear client's state */
		bitmap_zero(cs, cnt);
	}
	state->released |= (1 << pipe);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 83 | 81.37% | 2 | 50.00% | 
| Wentao Xu | 18 | 17.65% | 1 | 25.00% | 
| Stephane Viau | 1 | 0.98% | 1 | 25.00% | 
| Total | 102 | 100.00% | 4 | 100.00% | 
/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
 * happen after scanout completes.
 */
static unsigned update_smp_state(struct mdp5_smp *smp,
		u32 cid, mdp5_smp_state_t *assigned)
{
	int cnt = smp->blk_cnt;
	unsigned nblks = 0;
	u32 blk, val;
	for_each_set_bit(blk, *assigned, cnt) {
		int idx = blk / 3;
		int fld = blk % 3;
		val = smp->alloc_w[idx];
		switch (fld) {
		case 0:
			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
			val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
			break;
		case 1:
			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
			val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
			break;
		case 2:
			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
			val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
			break;
		}
		smp->alloc_w[idx] = val;
		smp->alloc_r[idx] = val;
		nblks++;
	}
	return nblks;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 103 | 70.55% | 2 | 33.33% | 
| Archit Taneja | 37 | 25.34% | 2 | 33.33% | 
| Stephane Viau | 6 | 4.11% | 2 | 33.33% | 
| Total | 146 | 100.00% | 6 | 100.00% | 
static void write_smp_alloc_regs(struct mdp5_smp *smp)
{
	struct mdp5_kms *mdp5_kms = get_kms(smp);
	int i, num_regs;
	num_regs = smp->blk_cnt / 3 + 1;
	for (i = 0; i < num_regs; i++) {
		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
			   smp->alloc_w[i]);
		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
			   smp->alloc_r[i]);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Archit Taneja | 67 | 78.82% | 2 | 66.67% | 
| Rob Clark | 18 | 21.18% | 1 | 33.33% | 
| Total | 85 | 100.00% | 3 | 100.00% | 
static void write_smp_fifo_regs(struct mdp5_smp *smp)
{
	struct mdp5_kms *mdp5_kms = get_kms(smp);
	int i;
	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
		enum mdp5_pipe pipe = hwpipe->pipe;
		mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
			   smp->pipe_reqprio_fifo_wm0[pipe]);
		mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
			   smp->pipe_reqprio_fifo_wm1[pipe]);
		mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
			   smp->pipe_reqprio_fifo_wm2[pipe]);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Archit Taneja | 108 | 96.43% | 1 | 33.33% | 
| Rob Clark | 4 | 3.57% | 2 | 66.67% | 
| Total | 112 | 100.00% | 3 | 100.00% | 
void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
	enum mdp5_pipe pipe;
	for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
		unsigned i, nblks = 0;
		for (i = 0; i < pipe2nclients(pipe); i++) {
			u32 cid = pipe2client(pipe, i);
			void *cs = state->client_state[cid];
			nblks += update_smp_state(smp, cid, cs);
			DBG("assign %s:%u, %u blks",
				pipe2name(pipe), i, nblks);
		}
		set_fifo_thresholds(smp, pipe, nblks);
	}
	write_smp_alloc_regs(smp);
	write_smp_fifo_regs(smp);
	state->assigned = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 70 | 55.56% | 3 | 42.86% | 
| Stephane Viau | 41 | 32.54% | 2 | 28.57% | 
| Archit Taneja | 10 | 7.94% | 1 | 14.29% | 
| Wentao Xu | 5 | 3.97% | 1 | 14.29% | 
| Total | 126 | 100.00% | 7 | 100.00% | 
void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
	enum mdp5_pipe pipe;
	for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
		DBG("release %s", pipe2name(pipe));
		set_fifo_thresholds(smp, pipe, 0);
	}
	write_smp_fifo_regs(smp);
	state->released = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 52 | 86.67% | 3 | 60.00% | 
| Archit Taneja | 5 | 8.33% | 1 | 20.00% | 
| Stephane Viau | 3 | 5.00% | 1 | 20.00% | 
| Total | 60 | 100.00% | 5 | 100.00% | 
void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
{
	struct mdp5_kms *mdp5_kms = get_kms(smp);
	struct mdp5_hw_pipe_state *hwpstate;
	struct mdp5_smp_state *state;
	int total = 0, i, j;
	drm_printf(p, "name\tinuse\tplane\n");
	drm_printf(p, "----\t-----\t-----\n");
	if (drm_can_sleep())
		drm_modeset_lock(&mdp5_kms->state_lock, NULL);
	/* grab these *after* we hold the state_lock */
	hwpstate = &mdp5_kms->state->hwpipe;
	state = &mdp5_kms->state->smp;
	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
		struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
		enum mdp5_pipe pipe = hwpipe->pipe;
		for (j = 0; j < pipe2nclients(pipe); j++) {
			u32 cid = pipe2client(pipe, j);
			void *cs = state->client_state[cid];
			int inuse = bitmap_weight(cs, smp->blk_cnt);
			drm_printf(p, "%s:%d\t%d\t%s\n",
				pipe2name(pipe), j, inuse,
				plane ? plane->name : NULL);
			total += inuse;
		}
	}
	drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
	drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
			bitmap_weight(state->state, smp->blk_cnt));
	if (drm_can_sleep())
		drm_modeset_unlock(&mdp5_kms->state_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 270 | 100.00% | 2 | 100.00% | 
| Total | 270 | 100.00% | 2 | 100.00% | 
void mdp5_smp_destroy(struct mdp5_smp *smp)
{
	kfree(smp);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Stephane Viau | 13 | 86.67% | 1 | 50.00% | 
| Rob Clark | 2 | 13.33% | 1 | 50.00% | 
| Total | 15 | 100.00% | 2 | 100.00% | 
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
	struct mdp5_smp_state *state = &mdp5_kms->state->smp;
	struct mdp5_smp *smp = NULL;
	int ret;
	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
	if (unlikely(!smp)) {
		ret = -ENOMEM;
		goto fail;
	}
	smp->dev = mdp5_kms->dev;
	smp->blk_cnt = cfg->mmb_count;
	smp->blk_size = cfg->mmb_size;
	/* statically tied MMBs cannot be re-allocated: */
	bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
	memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
	return smp;
fail:
	if (smp)
		mdp5_smp_destroy(smp);
	return ERR_PTR(ret);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Stephane Viau | 130 | 87.25% | 2 | 50.00% | 
| Rob Clark | 19 | 12.75% | 2 | 50.00% | 
| Total | 149 | 100.00% | 4 | 100.00% | 
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 852 | 46.38% | 5 | 35.71% | 
| Stephane Viau | 587 | 31.95% | 5 | 35.71% | 
| Archit Taneja | 276 | 15.02% | 2 | 14.29% | 
| Wentao Xu | 122 | 6.64% | 2 | 14.29% | 
| Total | 1837 | 100.00% | 14 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.