Release 4.15 drivers/gpu/drm/msm/adreno/adreno_gpu.c
  
  
  
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	switch (param) {
	case MSM_PARAM_GPU_ID:
		*value = adreno_gpu->info->revn;
		return 0;
	case MSM_PARAM_GMEM_SIZE:
		*value = adreno_gpu->gmem;
		return 0;
	case MSM_PARAM_GMEM_BASE:
		*value = 0x100000;
		return 0;
	case MSM_PARAM_CHIP_ID:
		*value = adreno_gpu->rev.patchid |
				(adreno_gpu->rev.minor << 8) |
				(adreno_gpu->rev.major << 16) |
				(adreno_gpu->rev.core << 24);
		return 0;
	case MSM_PARAM_MAX_FREQ:
		*value = adreno_gpu->base.fast_rate;
		return 0;
	case MSM_PARAM_TIMESTAMP:
		if (adreno_gpu->funcs->get_timestamp) {
			int ret;
			pm_runtime_get_sync(&gpu->pdev->dev);
			ret = adreno_gpu->funcs->get_timestamp(gpu, value);
			pm_runtime_put_autosuspend(&gpu->pdev->dev);
			return ret;
		}
		return -EINVAL;
	case MSM_PARAM_NR_RINGS:
		*value = gpu->nr_rings;
		return 0;
	default:
		DBG("%s: invalid param: %u", gpu->name, param);
		return -EINVAL;
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 163 | 75.12% | 4 | 57.14% | 
| Archit Taneja | 30 | 13.82% | 1 | 14.29% | 
| Jordan Crouse | 24 | 11.06% | 2 | 28.57% | 
| Total | 217 | 100.00% | 7 | 100.00% | 
const struct firmware *
adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
	struct drm_device *drm = adreno_gpu->base.dev;
	const struct firmware *fw = NULL;
	char newname[strlen("qcom/") + strlen(fwname) + 1];
	int ret;
	sprintf(newname, "qcom/%s", fwname);
	/*
         * Try first to load from qcom/$fwfile using a direct load (to avoid
         * a potential timeout waiting for usermode helper)
         */
	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
	    (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
		ret = request_firmware_direct(&fw, newname, drm->dev);
		if (!ret) {
			dev_info(drm->dev, "loaded %s from new location\n",
				newname);
			adreno_gpu->fwloc = FW_LOCATION_NEW;
			return fw;
		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
			dev_err(drm->dev, "failed to load %s: %d\n",
				newname, ret);
			return ERR_PTR(ret);
		}
	}
	/*
         * Then try the legacy location without qcom/ prefix
         */
	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
	    (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
		ret = request_firmware_direct(&fw, fwname, drm->dev);
		if (!ret) {
			dev_info(drm->dev, "loaded %s from legacy location\n",
				newname);
			adreno_gpu->fwloc = FW_LOCATION_LEGACY;
			return fw;
		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
			dev_err(drm->dev, "failed to load %s: %d\n",
				fwname, ret);
			return ERR_PTR(ret);
		}
	}
	/*
         * Finally fall back to request_firmware() for cases where the
         * usermode helper is needed (I think mainly android)
         */
	if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
	    (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
		ret = request_firmware(&fw, newname, drm->dev);
		if (!ret) {
			dev_info(drm->dev, "loaded %s with helper\n",
				newname);
			adreno_gpu->fwloc = FW_LOCATION_HELPER;
			return fw;
		} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
			dev_err(drm->dev, "failed to load %s: %d\n",
				newname, ret);
			return ERR_PTR(ret);
		}
	}
	dev_err(drm->dev, "failed to load %s\n", fwname);
	return ERR_PTR(-ENOENT);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 359 | 100.00% | 3 | 100.00% | 
| Total | 359 | 100.00% | 3 | 100.00% | 
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
	const struct firmware *fw;
	if (adreno_gpu->pm4)
		return 0;
	fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
	if (IS_ERR(fw))
		return PTR_ERR(fw);
	adreno_gpu->pm4 = fw;
	fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
	if (IS_ERR(fw)) {
		release_firmware(adreno_gpu->pm4);
		adreno_gpu->pm4 = NULL;
		return PTR_ERR(fw);
	}
	adreno_gpu->pfp = fw;
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 108 | 100.00% | 2 | 100.00% | 
| Total | 108 | 100.00% | 2 | 100.00% | 
int adreno_hw_init(struct msm_gpu *gpu)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	int ret, i;
	DBG("%s", gpu->name);
	ret = adreno_load_fw(adreno_gpu);
	if (ret)
		return ret;
	for (i = 0; i < gpu->nr_rings; i++) {
		struct msm_ringbuffer *ring = gpu->rb[i];
		if (!ring)
			continue;
		ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
		if (ret) {
			ring->iova = 0;
			dev_err(gpu->dev->dev,
				"could not map ringbuffer %d: %d\n", i, ret);
			return ret;
		}
		ring->cur = ring->start;
		ring->next = ring->start;
		/* reset completed fence seqno: */
		ring->memptrs->fence = ring->seqno;
		ring->memptrs->rptr = 0;
	}
	/*
         * Setup REG_CP_RB_CNTL.  The same value is used across targets (with
         * the excpetion of A430 that disables the RPTR shadow) - the cacluation
         * for the ringbuffer size and block size is moved to msm_gpu.h for the
         * pre-processor to deal with and the A430 variant is ORed in here
         */
	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
		MSM_GPU_RB_CNTL_DEFAULT |
		(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
	/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
	adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
		REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
	if (!adreno_is_a430(adreno_gpu)) {
		adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
			REG_ADRENO_CP_RB_RPTR_ADDR_HI,
			rbmemptr(gpu->rb[0], rptr));
	}
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 134 | 55.83% | 5 | 41.67% | 
| Jordan Crouse | 82 | 34.17% | 5 | 41.67% | 
| Craig Stout | 18 | 7.50% | 1 | 8.33% | 
| Aravind Ganesan | 6 | 2.50% | 1 | 8.33% | 
| Total | 240 | 100.00% | 12 | 100.00% | 
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
		struct msm_ringbuffer *ring)
{
	if (adreno_is_a430(adreno_gpu))
		return ring->memptrs->rptr = adreno_gpu_read(
			adreno_gpu, REG_ADRENO_CP_RB_RPTR);
	else
		return ring->memptrs->rptr;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Craig Stout | 30 | 66.67% | 1 | 25.00% | 
| Jordan Crouse | 10 | 22.22% | 2 | 50.00% | 
| Rob Clark | 5 | 11.11% | 1 | 25.00% | 
| Total | 45 | 100.00% | 4 | 100.00% | 
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
	return gpu->rb[0];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Jordan Crouse | 20 | 100.00% | 1 | 100.00% | 
| Total | 20 | 100.00% | 1 | 100.00% | 
void adreno_recover(struct msm_gpu *gpu)
{
	struct drm_device *dev = gpu->dev;
	int ret;
	// XXX pm-runtime??  we *need* the device to be off after this
	// so maybe continuing to call ->pm_suspend/resume() is better?
	gpu->funcs->pm_suspend(gpu);
	gpu->funcs->pm_resume(gpu);
	ret = msm_gpu_hw_init(gpu);
	if (ret) {
		dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
		/* hmm, oh well? */
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 67 | 100.00% | 2 | 100.00% | 
| Total | 67 | 100.00% | 2 | 100.00% | 
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
		struct msm_file_private *ctx)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	struct msm_drm_private *priv = gpu->dev->dev_private;
	struct msm_ringbuffer *ring = submit->ring;
	unsigned i;
	for (i = 0; i < submit->nr_cmds; i++) {
		switch (submit->cmd[i].type) {
		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
			/* ignore IB-targets */
			break;
		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
			/* ignore if there has not been a ctx switch: */
			if (priv->lastctx == ctx)
				break;
		case MSM_SUBMIT_CMD_BUF:
			OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
				CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
			OUT_RING(ring, submit->cmd[i].size);
			OUT_PKT2(ring);
			break;
		}
	}
	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
	OUT_RING(ring, submit->seqno);
	if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
		/* Flush HLSQ lazy updates to make sure there is nothing
                 * pending for indirect loads after the timestamp has
                 * passed:
                 */
		OUT_PKT3(ring, CP_EVENT_WRITE, 1);
		OUT_RING(ring, HLSQ_FLUSH);
		OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
		OUT_RING(ring, 0x00000000);
	}
	OUT_PKT3(ring, CP_EVENT_WRITE, 3);
	OUT_RING(ring, CACHE_FLUSH_TS);
	OUT_RING(ring, rbmemptr(ring, fence));
	OUT_RING(ring, submit->seqno);
	/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
	OUT_PKT3(ring, CP_INTERRUPT, 1);
	OUT_RING(ring, 0x80000000);
	/* Workaround for missing irq issue on 8x16/a306.  Unsure if the
         * root cause is a platform issue or some a306 quirk, but this
         * keeps things humming along:
         */
	if (adreno_is_a306(adreno_gpu)) {
		OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
		OUT_RING(ring, 0x00000000);
		OUT_PKT3(ring, CP_INTERRUPT, 1);
		OUT_RING(ring, 0x80000000);
	}
#if 0
	if (adreno_is_a3xx(adreno_gpu)) {
		/* Dummy set-constant to trigger context rollover */
		OUT_PKT3(ring, CP_SET_CONSTANT, 2);
                OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
                OUT_RING(ring, 0x00000000);
        }
#endif
	gpu->funcs->flush(gpu, ring);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 317 | 94.07% | 5 | 55.56% | 
| Jordan Crouse | 8 | 2.37% | 2 | 22.22% | 
| Craig Stout | 7 | 2.08% | 1 | 11.11% | 
| Aravind Ganesan | 5 | 1.48% | 1 | 11.11% | 
| Total | 337 | 100.00% | 9 | 100.00% | 
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	uint32_t wptr;
	/* Copy the shadow to the actual register */
	ring->cur = ring->next;
	/*
         * Mask wptr value that we calculate to fit in the HW range. This is
         * to account for the possibility that the last command fit exactly into
         * the ringbuffer and rb->next hasn't wrapped to zero yet
         */
	wptr = get_wptr(ring);
	/* ensure writes to ringbuffer have hit system memory: */
	mb();
	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Jordan Crouse | 23 | 39.66% | 4 | 66.67% | 
| Rob Clark | 22 | 37.93% | 1 | 16.67% | 
| Aravind Ganesan | 13 | 22.41% | 1 | 16.67% | 
| Total | 58 | 100.00% | 6 | 100.00% | 
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	uint32_t wptr = get_wptr(ring);
	/* wait for CP to drain ringbuffer: */
	if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
		return true;
	/* TODO maybe we need to reset GPU here to recover from hang? */
	DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
		gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
	return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 42 | 53.85% | 2 | 33.33% | 
| Jordan Crouse | 33 | 42.31% | 3 | 50.00% | 
| Craig Stout | 3 | 3.85% | 1 | 16.67% | 
| Total | 78 | 100.00% | 6 | 100.00% | 
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	int i;
	seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
			adreno_gpu->info->revn, adreno_gpu->rev.core,
			adreno_gpu->rev.major, adreno_gpu->rev.minor,
			adreno_gpu->rev.patchid);
	for (i = 0; i < gpu->nr_rings; i++) {
		struct msm_ringbuffer *ring = gpu->rb[i];
		seq_printf(m, "rb %d: fence:    %d/%d\n", i,
			ring->memptrs->fence, ring->seqno);
		seq_printf(m, "      rptr:     %d\n",
			get_rptr(adreno_gpu, ring));
		seq_printf(m, "rb wptr:  %d\n", get_wptr(ring));
	}
	/* dump these out in a form that can be parsed by demsm: */
	seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
		uint32_t start = adreno_gpu->registers[i];
		uint32_t end   = adreno_gpu->registers[i+1];
		uint32_t addr;
		for (addr = start; addr <= end; addr++) {
			uint32_t val = gpu_read(gpu, addr);
			seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
		}
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 194 | 82.20% | 3 | 60.00% | 
| Jordan Crouse | 39 | 16.53% | 1 | 20.00% | 
| Craig Stout | 3 | 1.27% | 1 | 20.00% | 
| Total | 236 | 100.00% | 5 | 100.00% | 
#endif
/* Dump common gpu status and scratch registers on any hang, to make
 * the hangcheck logs more useful.  The scratch registers seem always
 * safe to read when GPU has hung (unlike some other regs, depending
 * on how the GPU hung), and they are useful to match up to cmdstream
 * dumps when debugging hangs:
 */
void adreno_dump_info(struct msm_gpu *gpu)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	int i;
	printk("revision: %d (%d.%d.%d.%d)\n",
			adreno_gpu->info->revn, adreno_gpu->rev.core,
			adreno_gpu->rev.major, adreno_gpu->rev.minor,
			adreno_gpu->rev.patchid);
	for (i = 0; i < gpu->nr_rings; i++) {
		struct msm_ringbuffer *ring = gpu->rb[i];
		printk("rb %d: fence:    %d/%d\n", i,
			ring->memptrs->fence,
			ring->seqno);
		printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
		printk("rb wptr:  %d\n", get_wptr(ring));
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 82 | 65.08% | 3 | 60.00% | 
| Jordan Crouse | 41 | 32.54% | 1 | 20.00% | 
| Craig Stout | 3 | 2.38% | 1 | 20.00% | 
| Total | 126 | 100.00% | 5 | 100.00% | 
/* would be nice to not have to duplicate the _show() stuff with printk(): */
void adreno_dump(struct msm_gpu *gpu)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	int i;
	/* dump these out in a form that can be parsed by demsm: */
	printk("IO:region %s 00000000 00020000\n", gpu->name);
	for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
		uint32_t start = adreno_gpu->registers[i];
		uint32_t end   = adreno_gpu->registers[i+1];
		uint32_t addr;
		for (addr = start; addr <= end; addr++) {
			uint32_t val = gpu_read(gpu, addr);
			printk("IO:R %08x %08x\n", addr<<2, val);
		}
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 116 | 100.00% | 3 | 100.00% | 
| Total | 116 | 100.00% | 3 | 100.00% | 
static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
	uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
	/* Use ring->next to calculate free size */
	uint32_t wptr = ring->next - ring->start;
	uint32_t rptr = get_rptr(adreno_gpu, ring);
	return (rptr + (size - 1) - wptr) % size;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 47 | 70.15% | 2 | 40.00% | 
| Jordan Crouse | 17 | 25.37% | 2 | 40.00% | 
| Craig Stout | 3 | 4.48% | 1 | 20.00% | 
| Total | 67 | 100.00% | 5 | 100.00% | 
void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
	if (spin_until(ring_freewords(ring) >= ndwords))
		DRM_DEV_ERROR(ring->gpu->dev->dev,
			"timeout waiting for space in ringubffer %d\n",
			ring->id);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 27 | 64.29% | 3 | 75.00% | 
| Jordan Crouse | 15 | 35.71% | 1 | 25.00% | 
| Total | 42 | 100.00% | 4 | 100.00% | 
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
		struct adreno_gpu *adreno_gpu,
		const struct adreno_gpu_funcs *funcs, int nr_rings)
{
	struct adreno_platform_config *config = pdev->dev.platform_data;
	struct msm_gpu_config adreno_gpu_config  = { 0 };
	struct msm_gpu *gpu = &adreno_gpu->base;
	adreno_gpu->funcs = funcs;
	adreno_gpu->info = adreno_info(config->rev);
	adreno_gpu->gmem = adreno_gpu->info->gmem;
	adreno_gpu->revn = adreno_gpu->info->revn;
	adreno_gpu->rev = config->rev;
	gpu->fast_rate = config->fast_rate;
	gpu->bus_freq  = config->bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
	gpu->bus_scale_table = config->bus_scale_table;
#endif
	DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
			gpu->fast_rate, gpu->bus_freq);
	adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
	adreno_gpu_config.irqname = "kgsl_3d0_irq";
	adreno_gpu_config.va_start = SZ_16M;
	adreno_gpu_config.va_end = 0xffffffff;
	adreno_gpu_config.nr_rings = nr_rings;
	pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
			adreno_gpu->info->name, &adreno_gpu_config);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 157 | 68.86% | 7 | 58.33% | 
| Jordan Crouse | 71 | 31.14% | 5 | 41.67% | 
| Total | 228 | 100.00% | 12 | 100.00% | 
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
	release_firmware(adreno_gpu->pm4);
	release_firmware(adreno_gpu->pfp);
	msm_gpu_cleanup(&adreno_gpu->base);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 22 | 68.75% | 1 | 33.33% | 
| Jordan Crouse | 10 | 31.25% | 2 | 66.67% | 
| Total | 32 | 100.00% | 3 | 100.00% | 
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rob Clark | 1878 | 78.45% | 28 | 59.57% | 
| Jordan Crouse | 393 | 16.42% | 14 | 29.79% | 
| Craig Stout | 68 | 2.84% | 2 | 4.26% | 
| Archit Taneja | 30 | 1.25% | 1 | 2.13% | 
| Aravind Ganesan | 25 | 1.04% | 2 | 4.26% | 
| Total | 2394 | 100.00% | 47 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.