cregit-Linux how code gets into the kernel

Release 4.13 drivers/media/platform/s5p-mfc/s5p_mfc.c

/*
 * Samsung S5P Multi Format Codec v 5.1
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Kamil Debski, <k.debski@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <media/videobuf2-v4l2.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
#include "s5p_mfc_debug.h"
#include "s5p_mfc_dec.h"
#include "s5p_mfc_enc.h"
#include "s5p_mfc_intr.h"
#include "s5p_mfc_iommu.h"
#include "s5p_mfc_opr.h"
#include "s5p_mfc_cmd.h"
#include "s5p_mfc_pm.h"


#define S5P_MFC_DEC_NAME	"s5p-mfc-dec"

#define S5P_MFC_ENC_NAME	"s5p-mfc-enc"


int mfc_debug_level;
module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");


static char *mfc_mem_size;
module_param_named(mem, mfc_mem_size, charp, 0644);
MODULE_PARM_DESC(mem, "Preallocated memory size for the firmware and context buffers");

/* Helper functions for interrupt processing */

/* Remove from hw execution round robin */

void clear_work_bit(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; spin_lock(&dev->condlock); __clear_bit(ctx->num, &dev->ctx_work_bits); spin_unlock(&dev->condlock); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski2655.32%150.00%
Andrzej Hajda2144.68%150.00%
Total47100.00%2100.00%

/* Add to hw execution round robin */
void set_work_bit(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; spin_lock(&dev->condlock); __set_bit(ctx->num, &dev->ctx_work_bits); spin_unlock(&dev->condlock); }

Contributors

PersonTokensPropCommitsCommitProp
Andrzej Hajda2757.45%150.00%
Kamil Debski2042.55%150.00%
Total47100.00%2100.00%

/* Remove from hw execution round robin */
void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; spin_lock_irqsave(&dev->condlock, flags); __clear_bit(ctx->num, &dev->ctx_work_bits); spin_unlock_irqrestore(&dev->condlock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Andrzej Hajda55100.00%1100.00%
Total55100.00%1100.00%

/* Add to hw execution round robin */
void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; spin_lock_irqsave(&dev->condlock, flags); __set_bit(ctx->num, &dev->ctx_work_bits); spin_unlock_irqrestore(&dev->condlock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Andrzej Hajda55100.00%1100.00%
Total55100.00%1100.00%


int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) { unsigned long flags; int ctx; spin_lock_irqsave(&dev->condlock, flags); ctx = dev->curr_ctx; do { ctx = (ctx + 1) % MFC_NUM_CONTEXTS; if (ctx == dev->curr_ctx) { if (!test_bit(ctx, &dev->ctx_work_bits)) ctx = -EAGAIN; break; } } while (!test_bit(ctx, &dev->ctx_work_bits)); spin_unlock_irqrestore(&dev->condlock, flags); return ctx; }

Contributors

PersonTokensPropCommitsCommitProp
Andrzej Hajda102100.00%1100.00%
Total102100.00%1100.00%

/* Wake up context wait_queue */
static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) { ctx->int_cond = 1; ctx->int_type = reason; ctx->int_err = err; wake_up(&ctx->queue); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski45100.00%1100.00%
Total45100.00%1100.00%

/* Wake up device wait_queue */
static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason, unsigned int err) { dev->int_cond = 1; dev->int_type = reason; dev->int_err = err; wake_up(&dev->queue); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski45100.00%1100.00%
Total45100.00%1100.00%


void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq) { struct s5p_mfc_buf *b; int i; while (!list_empty(lh)) { b = list_entry(lh->next, struct s5p_mfc_buf, list); for (i = 0; i < b->b->vb2_buf.num_planes; i++) vb2_set_plane_payload(&b->b->vb2_buf, i, 0); vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR); list_del(&b->list); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrzej Hajda100100.00%1100.00%
Total100100.00%1100.00%


static void s5p_mfc_watchdog(unsigned long arg) { struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg; if (test_bit(0, &dev->hw_lock)) atomic_inc(&dev->watchdog_cnt); if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) { /* This means that hw is busy and no interrupts were * generated by hw for the Nth time of running this * watchdog timer. This usually means a serious hw * error. Now it is time to kill all instances and * reset the MFC. */ mfc_err("Time out during waiting for HW\n"); schedule_work(&dev->watchdog_work); } dev->watchdog_timer.expires = jiffies + msecs_to_jiffies(MFC_WATCHDOG_INTERVAL); add_timer(&dev->watchdog_timer); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski8997.80%133.33%
Bhaktipriya Shridhar11.10%133.33%
Sachin Kamat11.10%133.33%
Total91100.00%3100.00%


static void s5p_mfc_watchdog_worker(struct work_struct *work) { struct s5p_mfc_dev *dev; struct s5p_mfc_ctx *ctx; unsigned long flags; int mutex_locked; int i, ret; dev = container_of(work, struct s5p_mfc_dev, watchdog_work); mfc_err("Driver timeout error handling\n"); /* Lock the mutex that protects open and release. * This is necessary as they may load and unload firmware. */ mutex_locked = mutex_trylock(&dev->mfc_mutex); if (!mutex_locked) mfc_err("Error: some instance may be closing/opening\n"); spin_lock_irqsave(&dev->irqlock, flags); s5p_mfc_clock_off(); for (i = 0; i < MFC_NUM_CONTEXTS; i++) { ctx = dev->ctx[i]; if (!ctx) continue; ctx->state = MFCINST_ERROR; s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); clear_work_bit(ctx); wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0); } clear_bit(0, &dev->hw_lock); spin_unlock_irqrestore(&dev->irqlock, flags); /* De-init MFC */ s5p_mfc_deinit_hw(dev); /* Double check if there is at least one instance running. * If no instance is in memory than no firmware should be present */ if (dev->num_inst > 0) { ret = s5p_mfc_load_firmware(dev); if (ret) { mfc_err("Failed to reload FW\n"); goto unlock; } s5p_mfc_clock_on(); ret = s5p_mfc_init_hw(dev); s5p_mfc_clock_off(); if (ret) mfc_err("Failed to reinit FW\n"); } unlock: if (mutex_locked) mutex_unlock(&dev->mfc_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski24194.88%116.67%
Arun Mankuzhi62.36%116.67%
Marek Szyprowski31.18%116.67%
Andrzej Hajda20.79%116.67%
Arun Kumar K20.79%233.33%
Total254100.00%6100.00%


static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_buf *dst_buf; struct s5p_mfc_dev *dev = ctx->dev; ctx->state = MFCINST_FINISHED; ctx->sequence++; while (!list_empty(&ctx->dst_queue)) { dst_buf = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); mfc_debug(2, "Cleaning up buffer: %d\n", dst_buf->b->vb2_buf.index); vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0, 0); vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1, 0); list_del(&dst_buf->list); dst_buf->flags |= MFC_BUF_FLAG_EOS; ctx->dst_queue_cnt--; dst_buf->b->sequence = (ctx->sequence++); if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) == s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx)) dst_buf->b->field = V4L2_FIELD_NONE; else dst_buf->b->field = V4L2_FIELD_INTERLACED; dst_buf->b->flags |= V4L2_BUF_FLAG_LAST; ctx->dec_dst_flag &= ~(1 << dst_buf->b->vb2_buf.index); vb2_buffer_done(&dst_buf->b->vb2_buf, VB2_BUF_STATE_DONE); } }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski17077.98%120.00%
Arun Kumar K2310.55%120.00%
Junghak Sung115.05%120.00%
Philipp Zabel83.67%120.00%
Andrzej Hajda62.75%120.00%
Total218100.00%5100.00%


static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *dst_buf, *src_buf; size_t dec_y_addr; unsigned int frame_type; /* Make sure we actually have a new frame before continuing. */ frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) return; dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev); /* Copy timestamp / timecode from decoded src to dst and set appropriate flags. */ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); list_for_each_entry(dst_buf, &ctx->dst_queue, list) { if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0) == dec_y_addr) { dst_buf->b->timecode = src_buf->b->timecode; dst_buf->b->vb2_buf.timestamp = src_buf->b->vb2_buf.timestamp; dst_buf->b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_buf->b->flags |= src_buf->b->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; switch (frame_type) { case S5P_FIMV_DECODE_FRAME_I_FRAME: dst_buf->b->flags |= V4L2_BUF_FLAG_KEYFRAME; break; case S5P_FIMV_DECODE_FRAME_P_FRAME: dst_buf->b->flags |= V4L2_BUF_FLAG_PFRAME; break; case S5P_FIMV_DECODE_FRAME_B_FRAME: dst_buf->b->flags |= V4L2_BUF_FLAG_BFRAME; break; default: /* Don't know how to handle S5P_FIMV_DECODE_FRAME_OTHER_FRAME. */ mfc_debug(2, "Unexpected frame type: %d\n", frame_type); } break; } } }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski14264.84%225.00%
Sakari Ailus2310.50%112.50%
Arun Kumar K2310.50%112.50%
Ilja Friedel2310.50%112.50%
Junghak Sung73.20%225.00%
Marek Szyprowski10.46%112.50%
Total219100.00%8100.00%


static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *dst_buf; size_t dspl_y_addr; unsigned int frame_type; dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); if (IS_MFCV6_PLUS(dev)) frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx); else frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); /* If frame is same as previous then skip and do not dequeue */ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { if (!ctx->after_packed_pb) ctx->sequence++; ctx->after_packed_pb = 0; return; } ctx->sequence++; /* The MFC returns address of the buffer, now we have to * check which videobuf does it correspond to */ list_for_each_entry(dst_buf, &ctx->dst_queue, list) { /* Check if this is the buffer we're looking for */ if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0) == dspl_y_addr) { list_del(&dst_buf->list); ctx->dst_queue_cnt--; dst_buf->b->sequence = ctx->sequence; if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) == s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx)) dst_buf->b->field = V4L2_FIELD_NONE; else dst_buf->b->field = V4L2_FIELD_INTERLACED; vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0, ctx->luma_size); vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1, ctx->chroma_size); clear_bit(dst_buf->b->vb2_buf.index, &ctx->dec_dst_flag); vb2_buffer_done(&dst_buf->b->vb2_buf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); break; } } }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski20072.73%116.67%
Arun Kumar K4014.55%233.33%
Sjoerd Simons217.64%116.67%
Junghak Sung134.73%116.67%
Marek Szyprowski10.36%116.67%
Total275100.00%6100.00%

/* Handle frame decoding interrupt */
static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int dst_frame_status; unsigned int dec_frame_status; struct s5p_mfc_buf *src_buf; unsigned int res_change; dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev) & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK; dec_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dec_status, dev) & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK; res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev) & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK) >> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT; mfc_debug(2, "Frame Status: %x\n", dst_frame_status); if (ctx->state == MFCINST_RES_CHANGE_INIT) ctx->state = MFCINST_RES_CHANGE_FLUSH; if (res_change == S5P_FIMV_RES_INCREASE || res_change == S5P_FIMV_RES_DECREASE) { ctx->state = MFCINST_RES_CHANGE_INIT; s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); return; } if (ctx->dpb_flush_flag) ctx->dpb_flush_flag = 0; /* All frames remaining in the buffer have been extracted */ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) { if (ctx->state == MFCINST_RES_CHANGE_FLUSH) { static const struct v4l2_event ev_src_ch = { .type = V4L2_EVENT_SOURCE_CHANGE, .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, }; s5p_mfc_handle_frame_all_extracted(ctx); ctx->state = MFCINST_RES_CHANGE_END; v4l2_event_queue_fh(&ctx->fh, &ev_src_ch); goto leave_handle_frame; } else { s5p_mfc_handle_frame_all_extracted(ctx); } } if (dec_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) s5p_mfc_handle_frame_copy_time(ctx); /* A frame has been decoded and is in the buffer */ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY || dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) { s5p_mfc_handle_frame_new(ctx, err); } else { mfc_debug(2, "No frame decode\n"); } /* Mark source buffer as complete */ if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY && !list_empty(&ctx->src_queue)) { src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream, dev); if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC && ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC && ctx->consumed_stream + STUFF_BYTE < src_buf->b->vb2_buf.planes[0].bytesused) { /* Run MFC again on the same buffer */ mfc_debug(2, "Running again the same buffer\n"); ctx->after_packed_pb = 1; } else { mfc_debug(2, "MFC needs next buffer\n"); ctx->consumed_stream = 0; if (src_buf->flags & MFC_BUF_FLAG_EOS) ctx->state = MFCINST_FINISHING; list_del(&src_buf->list); ctx->src_queue_cnt--; if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0) vb2_buffer_done(&src_buf->b->vb2_buf, VB2_BUF_STATE_ERROR); else vb2_buffer_done(&src_buf->b->vb2_buf, VB2_BUF_STATE_DONE); } } leave_handle_frame: if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) || ctx->dst_queue_cnt < ctx->pb_count) clear_work_bit(ctx); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); /* if suspending, wake up device and do not try_run again*/ if (test_bit(0, &dev->enter_suspend)) wake_up_dev(dev, reason, err); else s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski42270.81%216.67%
Pawel Osciak6510.91%433.33%
Arun Kumar K6310.57%216.67%
Prathyush K233.86%18.33%
Jeongtae Park101.68%18.33%
Junghak Sung91.51%18.33%
Andrzej Hajda40.67%18.33%
Total596100.00%12100.00%

/* Error handling for interrupt */
static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) { mfc_err("Interrupt Error: %08x\n", err); if (ctx != NULL) { /* Error recovery is dependent on the state of context */ switch (ctx->state) { case MFCINST_RES_CHANGE_INIT: case MFCINST_RES_CHANGE_FLUSH: case MFCINST_RES_CHANGE_END: case MFCINST_FINISHING: case MFCINST_FINISHED: case MFCINST_RUNNING: /* It is highly probable that an error occurred * while decoding a frame */ clear_work_bit(ctx); ctx->state = MFCINST_ERROR; /* Mark all dst buffers as having an error */ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); /* Mark all src buffers as having an error */ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); wake_up_ctx(ctx, reason, err); break; default: clear_work_bit(ctx); ctx->state = MFCINST_ERROR; wake_up_ctx(ctx, reason, err); break; } } WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); s5p_mfc_clock_off(); wake_up_dev(dev, reason, err); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski16996.02%233.33%
Pawel Osciak31.70%116.67%
Andrzej Hajda31.70%233.33%
Jonathan McCrohan10.57%116.67%
Total176100.00%6100.00%

/* Header parsing interrupt handling */
static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) { struct s5p_mfc_dev *dev; if (ctx == NULL) return; dev = ctx->dev; if (ctx->c_ops->post_seq_start) { if (ctx->c_ops->post_seq_start(ctx)) mfc_err("post_seq_start() failed\n"); } else { ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width, dev); ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height, dev); s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx); ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, dev); ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, dev); if (ctx->img_width == 0 || ctx->img_height == 0) ctx->state = MFCINST_ERROR; else ctx->state = MFCINST_HEAD_PARSED; if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) && !list_empty(&ctx->src_queue)) { struct s5p_mfc_buf *src_buf; src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream, dev) < src_buf->b->vb2_buf.planes[0].bytesused) ctx->head_processed = 0; else ctx->head_processed = 1; } else { ctx->head_processed = 1; } } s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); clear_work_bit(ctx); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); wake_up_ctx(ctx, reason, err); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski14947.45%110.00%
Jeongtae Park10834.39%110.00%
Arun Kumar K4614.65%330.00%
Andrzej Hajda30.96%110.00%
Junghak Sung30.96%110.00%
Pawel Osciak30.96%110.00%
Julia Lawall10.32%110.00%
Sachin Kamat10.32%110.00%
Total314100.00%10100.00%

/* Header parsing interrupt handling */
static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) { struct s5p_mfc_buf *src_buf; struct s5p_mfc_dev *dev; if (ctx == NULL) return; dev = ctx->dev; s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); ctx->int_type = reason; ctx->int_err = err; ctx->int_cond = 1; clear_work_bit(ctx); if (err == 0) { ctx->state = MFCINST_RUNNING; if (!ctx->dpb_flush_flag && ctx->head_processed) { if (!list_empty(&ctx->src_queue)) { src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); list_del(&src_buf->list); ctx->src_queue_cnt--; vb2_buffer_done(&src_buf->b->vb2_buf, VB2_BUF_STATE_DONE); } } else { ctx->dpb_flush_flag = 0; } WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); wake_up(&ctx->queue); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); wake_up(&ctx->queue); } }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski20387.50%112.50%
Arun Kumar K125.17%112.50%
Pawel Osciak62.59%112.50%
Jeongtae Park41.72%112.50%
Andrzej Hajda31.29%225.00%
Junghak Sung31.29%112.50%
Sachin Kamat10.43%112.50%
Total232100.00%8100.00%


static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *mb_entry; mfc_debug(2, "Stream completed\n"); ctx->state = MFCINST_FINISHED; if (!list_empty(&ctx->dst_queue)) { mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); list_del(&mb_entry->list); ctx->dst_queue_cnt--; vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0); vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); } clear_work_bit(ctx); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); wake_up(&ctx->queue); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Andrzej Hajda13490.54%350.00%
Junghak Sung64.05%116.67%
Arun Kumar K64.05%116.67%
Sachin Kamat21.35%116.67%
Total148100.00%6100.00%

/* Interrupt processing */
static irqreturn_t s5p_mfc_irq(int irq, void *priv) { struct s5p_mfc_dev *dev = priv; struct s5p_mfc_ctx *ctx; unsigned int reason; unsigned int err; mfc_debug_enter(); /* Reset the timeout watchdog */ atomic_set(&dev->watchdog_cnt, 0); spin_lock(&dev->irqlock); ctx = dev->ctx[dev->curr_ctx]; /* Get the reason of interrupt and the error code */ reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev); err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev); mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); switch (reason) { case S5P_MFC_R2H_CMD_ERR_RET: /* An error has occurred */ if (ctx->state == MFCINST_RUNNING && (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= dev->warn_start || err == S5P_FIMV_ERR_NO_VALID_SEQ_HDR || err == S5P_FIMV_ERR_INCOMPLETE_FRAME || err == S5P_FIMV_ERR_TIMEOUT)) s5p_mfc_handle_frame(ctx, reason, err); else s5p_mfc_handle_error(dev, ctx, reason, err); clear_bit(0, &dev->enter_suspend); break; case S5P_MFC_R2H_CMD_SLICE_DONE_RET: case S5P_MFC_R2H_CMD_FIELD_DONE_RET: case S5P_MFC_R2H_CMD_FRAME_DONE_RET: if (ctx->c_ops->post_frame_start) { if (ctx->c_ops->post_frame_start(ctx)) mfc_err("post_frame_start() failed\n"); if (ctx->state == MFCINST_FINISHING && list_empty(&ctx->ref_queue)) { s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); s5p_mfc_handle_stream_complete(ctx); break; } s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); s5p_mfc_clock_off(); wake_up_ctx(ctx, reason, err); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { s5p_mfc_handle_frame(ctx, reason, err); } break; case S5P_MFC_R2H_CMD_SEQ_DONE_RET: s5p_mfc_handle_seq_done(ctx, reason, err); break; case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET: ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev); ctx->state = MFCINST_GOT_INST; goto irq_cleanup_hw; case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET: ctx->inst_no = MFC_NO_INSTANCE_SET; ctx->state = MFCINST_FREE; goto irq_cleanup_hw; case S5P_MFC_R2H_CMD_SYS_INIT_RET: case S5P_MFC_R2H_CMD_FW_STATUS_RET: case S5P_MFC_R2H_CMD_SLEEP_RET: case S5P_MFC_R2H_CMD_WAKEUP_RET: if (ctx) clear_work_bit(ctx); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); clear_bit(0, &dev->hw_lock); clear_bit(0, &dev->enter_suspend); wake_up_dev(dev, reason, err); break; case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET: s5p_mfc_handle_init_buffers(ctx, reason, err); break; case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET: s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); ctx->int_type = reason; ctx->int_err = err; s5p_mfc_handle_stream_complete(ctx); break; case S5P_MFC_R2H_CMD_DPB_FLUSH_RET: ctx->state = MFCINST_RUNNING; goto irq_cleanup_hw; default: mfc_debug(2, "Unknown int reason\n"); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); } spin_unlock(&dev->irqlock); mfc_debug_leave(); return IRQ_HANDLED; irq_cleanup_hw: s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); ctx->int_type = reason; ctx->int_err = err; ctx->int_cond = 1; if (test_and_clear_bit(0, &dev->hw_lock) == 0) mfc_err("Failed to unlock hw\n"); s5p_mfc_clock_off(); clear_work_bit(ctx); wake_up(&ctx->queue); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); spin_unlock(&dev->irqlock); mfc_debug(2, "Exit via irq_cleanup_hw\n"); return IRQ_HANDLED; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski36659.03%215.38%
Arun Kumar K10316.61%215.38%
Andrzej Hajda9615.48%430.77%
Marek Szyprowski315.00%17.69%
Donghwa Lee142.26%17.69%
Pawel Osciak91.45%215.38%
Jonathan McCrohan10.16%17.69%
Total620100.00%13100.00%

/* Open an MFC node */
static int s5p_mfc_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = NULL; struct vb2_queue *q; int ret = 0; mfc_debug_enter(); if (mutex_lock_interruptible(&dev->mfc_mutex)) return -ERESTARTSYS; dev->num_inst++; /* It is guarded by mfc_mutex in vfd */ /* Allocate memory for context */ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { ret = -ENOMEM; goto err_alloc; } init_waitqueue_head(&ctx->queue); v4l2_fh_init(&ctx->fh, vdev); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->dev = dev; INIT_LIST_HEAD(&ctx->src_queue); INIT_LIST_HEAD(&ctx->dst_queue); ctx->src_queue_cnt = 0; ctx->dst_queue_cnt = 0; /* Get context number */ ctx->num = 0; while (dev->ctx[ctx->num]) { ctx->num++; if (ctx->num >= MFC_NUM_CONTEXTS) { mfc_debug(2, "Too many open contexts\n"); ret = -EBUSY; goto err_no_ctx; } } /* Mark context as idle */ clear_work_bit_irqsave(ctx); dev->ctx[ctx->num] = ctx; if (vdev == dev->vfd_dec) { ctx->type = MFCINST_DECODER; ctx->c_ops = get_dec_codec_ops(); s5p_mfc_dec_init(ctx); /* Setup ctrl handler */ ret = s5p_mfc_dec_ctrls_setup(ctx); if (ret) { mfc_err("Failed to setup mfc controls\n"); goto err_ctrls_setup; } } else if (vdev == dev->vfd_enc) { ctx->type = MFCINST_ENCODER; ctx->c_ops = get_enc_codec_ops(); /* only for encoder */ INIT_LIST_HEAD(&ctx->ref_queue); ctx->ref_queue_cnt = 0; s5p_mfc_enc_init(ctx); /* Setup ctrl handler */ ret = s5p_mfc_enc_ctrls_setup(ctx); if (ret) { mfc_err("Failed to setup mfc controls\n"); goto err_ctrls_setup; } } else { ret = -ENOENT; goto err_bad_node; } ctx->fh.ctrl_handler = &ctx->ctrl_handler; ctx->inst_no = MFC_NO_INSTANCE_SET; /* Load firmware if this is the first instance */ if (dev->num_inst == 1) { dev->watchdog_timer.expires = jiffies + msecs_to_jiffies(MFC_WATCHDOG_INTERVAL); add_timer(&dev->watchdog_timer); ret = s5p_mfc_power_on(); if (ret < 0) { mfc_err("power on failed\n"); goto err_pwr_enable; } s5p_mfc_clock_on(); ret = s5p_mfc_load_firmware(dev); if (ret) { s5p_mfc_clock_off(); goto err_load_fw; } /* Init the FW */ ret = s5p_mfc_init_hw(dev); s5p_mfc_clock_off(); if (ret) goto err_init_hw; } /* Init videobuf2 queue for CAPTURE */ q = &ctx->vq_dst; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->drv_priv = &ctx->fh; q->lock = &dev->mfc_mutex; if (vdev == dev->vfd_dec) { q->io_modes = VB2_MMAP; q->ops = get_dec_queue_ops(); } else if (vdev == dev->vfd_enc) { q->io_modes = VB2_MMAP | VB2_USERPTR; q->ops = get_enc_queue_ops(); } else { ret = -ENOENT; goto err_queue_init; } /* * We'll do mostly sequential access, so sacrifice TLB efficiency for * faster allocation. */ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(q); if (ret) { mfc_err("Failed to initialize videobuf2 queue(capture)\n"); goto err_queue_init; } /* Init videobuf2 queue for OUTPUT */ q = &ctx->vq_src; q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; q->drv_priv = &ctx->fh; q->lock = &dev->mfc_mutex; if (vdev == dev->vfd_dec) { q->io_modes = VB2_MMAP; q->ops = get_dec_queue_ops(); } else if (vdev == dev->vfd_enc) { q->io_modes = VB2_MMAP | VB2_USERPTR; q->ops = get_enc_queue_ops(); } else { ret = -ENOENT; goto err_queue_init; } /* One way to indicate end-of-stream for MFC is to set the * bytesused == 0. However by default videobuf2 handles bytesused * equal to 0 as a special case and changes its value to the size * of the buffer. Set the allow_zero_bytesused flag so that videobuf2 * will keep the value of bytesused intact. */ q->allow_zero_bytesused = 1; /* * We'll do mostly sequential access, so sacrifice TLB efficiency for * faster allocation. */ q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(q); if (ret) { mfc_err("Failed to initialize videobuf2 queue(output)\n"); goto err_queue_init; } mutex_unlock(&dev->mfc_mutex); mfc_debug_leave(); return ret; /* Deinit when failure occurred */ err_queue_init: if (dev->num_inst == 1) s5p_mfc_deinit_hw(dev); err_init_hw: err_load_fw: err_pwr_enable: if (dev->num_inst == 1) { if (s5p_mfc_power_off() < 0) mfc_err("power off failed\n"); del_timer_sync(&dev->watchdog_timer); } err_ctrls_setup: s5p_mfc_dec_ctrls_delete(ctx); err_bad_node: dev->ctx[ctx->num] = NULL; err_no_ctx: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); err_alloc: dev->num_inst--; mutex_unlock(&dev->mfc_mutex); mfc_debug_leave(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski75986.64%631.58%
Marek Szyprowski424.79%210.53%
Hans Verkuil303.42%15.26%
Douglas Anderson141.60%15.26%
Arun Kumar K101.14%15.26%
Lad Prabhakar91.03%15.26%
Shuah Khan30.34%15.26%
Sachin Kamat30.34%15.26%
Sakari Ailus20.23%15.26%
Pawel Osciak10.11%15.26%
Zhaowei Yuan10.11%15.26%
Jonathan McCrohan10.11%15.26%
Andrzej Hajda10.11%15.26%
Total876100.00%19100.00%

/* Release MFC context */
static int s5p_mfc_release(struct file *file) { struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data); struct s5p_mfc_dev *dev = ctx->dev; /* if dev is null, do cleanup that doesn't need dev */ mfc_debug_enter(); if (dev) mutex_lock(&dev->mfc_mutex); vb2_queue_release(&ctx->vq_src); vb2_queue_release(&ctx->vq_dst); if (dev) { s5p_mfc_clock_on(); /* Mark context as idle */ clear_work_bit_irqsave(ctx); /* * If instance was initialised and not yet freed, * return instance and free resources */ if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) { mfc_debug(2, "Has to free instance\n"); s5p_mfc_close_mfc_inst(dev, ctx); } /* hardware locking scheme */ if (dev->curr_ctx == ctx->num) clear_bit(0, &dev->hw_lock); dev->num_inst--; if (dev->num_inst == 0) { mfc_debug(2, "Last instance\n"); s5p_mfc_deinit_hw(dev); del_timer_sync(&dev->watchdog_timer); s5p_mfc_clock_off(); if (s5p_mfc_power_off() < 0) mfc_err("Power off failed\n"); } else { mfc_debug(2, "Shutting down clock\n"); s5p_mfc_clock_off(); } } if (dev) dev->ctx[ctx->num] = NULL; s5p_mfc_dec_ctrls_delete(ctx); v4l2_fh_del(&ctx->fh); /* vdev is gone if dev is null */ if (dev) v4l2_fh_exit(&ctx->fh); kfree(ctx); mfc_debug_leave(); if (dev) mutex_unlock(&dev->mfc_mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski19073.64%218.18%
Shuah Khan249.30%19.09%
Hans Verkuil166.20%19.09%
Marek Szyprowski103.88%218.18%
Pawel Osciak93.49%218.18%
Arun Kumar K72.71%19.09%
Andrzej Hajda10.39%19.09%
Sachin Kamat10.39%19.09%
Total258100.00%11100.00%

/* Poll */
static unsigned int s5p_mfc_poll(struct file *file, struct poll_table_struct *wait) { struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data); struct s5p_mfc_dev *dev = ctx->dev; struct vb2_queue *src_q, *dst_q; struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; unsigned int rc = 0; unsigned long flags; mutex_lock(&dev->mfc_mutex); src_q = &ctx->vq_src; dst_q = &ctx->vq_dst; /* * There has to be at least one buffer queued on each queued_list, which * means either in driver already or waiting for driver to claim it * and start processing. */ if ((!src_q->streaming || list_empty(&src_q->queued_list)) && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { rc = POLLERR; goto end; } mutex_unlock(&dev->mfc_mutex); poll_wait(file, &ctx->fh.wait, wait); poll_wait(file, &src_q->done_wq, wait); poll_wait(file, &dst_q->done_wq, wait); mutex_lock(&dev->mfc_mutex); if (v4l2_event_pending(&ctx->fh)) rc |= POLLPRI; spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, done_entry); if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE || src_vb->state == VB2_BUF_STATE_ERROR)) rc |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&src_q->done_lock, flags); spin_lock_irqsave(&dst_q->done_lock, flags); if (!list_empty(&dst_q->done_list)) dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, done_entry); if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE || dst_vb->state == VB2_BUF_STATE_ERROR)) rc |= POLLIN | POLLRDNORM; spin_unlock_irqrestore(&dst_q->done_lock, flags); end: mutex_unlock(&dev->mfc_mutex); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski30987.54%133.33%
Andrzej Hajda287.93%133.33%
Hans Verkuil164.53%133.33%
Total353100.00%3100.00%

/* Mmap */
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma) { struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data); struct s5p_mfc_dev *dev = ctx->dev; unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; int ret; if (mutex_lock_interruptible(&dev->mfc_mutex)) return -ERESTARTSYS; if (offset < DST_QUEUE_OFF_BASE) { mfc_debug(2, "mmaping source\n"); ret = vb2_mmap(&ctx->vq_src, vma); } else { /* capture */ mfc_debug(2, "mmaping destination\n"); vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); ret = vb2_mmap(&ctx->vq_dst, vma); } mutex_unlock(&dev->mfc_mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski10477.04%150.00%
Hans Verkuil3122.96%150.00%
Total135100.00%2100.00%

/* v4l2 ops */ static const struct v4l2_file_operations s5p_mfc_fops = { .owner = THIS_MODULE, .open = s5p_mfc_open, .release = s5p_mfc_release, .poll = s5p_mfc_poll, .unlocked_ioctl = video_ioctl2, .mmap = s5p_mfc_mmap, }; /* DMA memory related helper functions */
static void s5p_mfc_memdev_release(struct device *dev) { of_reserved_mem_device_release(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Javier Martinez Canillas1593.75%150.00%
Marek Szyprowski16.25%150.00%
Total16100.00%2100.00%


static struct device *s5p_mfc_alloc_memdev(struct device *dev, const char *name, unsigned int idx) { struct device *child; int ret; child = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL); if (!child) return NULL; device_initialize(child); dev_set_name(child, "%s:%s", dev_name(dev), name); child->parent = dev; child->bus = dev->bus; child->coherent_dma_mask = dev->coherent_dma_mask; child->dma_mask = dev->dma_mask; child->release = s5p_mfc_memdev_release; if (device_add(child) == 0) { ret = of_reserved_mem_device_init_by_idx(child, dev->of_node, idx); if (ret == 0) return child; device_del(child); } put_device(child); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski8957.79%233.33%
Arun Kumar K5636.36%233.33%
Javier Martinez Canillas95.84%233.33%
Total154100.00%6100.00%


static int s5p_mfc_configure_2port_memory(struct s5p_mfc_dev *mfc_dev) { struct device *dev = &mfc_dev->plat_dev->dev; void *bank2_virt; dma_addr_t bank2_dma_addr; unsigned long align_size = 1 << MFC_BASE_ALIGN_ORDER; int ret; /* * Create and initialize virtual devices for accessing * reserved memory regions. */ mfc_dev->mem_dev[BANK_L_CTX] = s5p_mfc_alloc_memdev(dev, "left", BANK_L_CTX); if (!mfc_dev->mem_dev[BANK_L_CTX]) return -ENODEV; mfc_dev->mem_dev[BANK_R_CTX] = s5p_mfc_alloc_memdev(dev, "right", BANK_R_CTX); if (!mfc_dev->mem_dev[BANK_R_CTX]) { device_unregister(mfc_dev->mem_dev[BANK_L_CTX]); return -ENODEV; } /* Allocate memory for firmware and initialize both banks addresses */ ret = s5p_mfc_alloc_firmware(mfc_dev); if (ret) { device_unregister(mfc_dev->mem_dev[BANK_R_CTX]); device_unregister(mfc_dev->mem_dev[BANK_L_CTX]); return ret; } mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->fw_buf.dma; bank2_virt = dma_alloc_coherent(mfc_dev->mem_dev[BANK_R_CTX], align_size, &bank2_dma_addr, GFP_KERNEL); if (!bank2_virt) { mfc_err("Allocating bank2 base failed\n"); s5p_mfc_release_firmware(mfc_dev); device_unregister(mfc_dev->mem_dev[BANK_R_CTX]); device_unregister(mfc_dev->mem_dev[BANK_L_CTX]); return -ENOMEM; } /* Valid buffers passed to MFC encoder with LAST_FRAME command * should not have address of bank2 - MFC will treat it as a null frame. * To avoid such situation we set bank2 address below the pool address. */ mfc_dev->dma_base[BANK_R_CTX] = bank2_dma_addr - align_size; dma_free_coherent(mfc_dev->mem_dev[BANK_R_CTX], align_size, bank2_virt, bank2_dma_addr); vb2_dma_contig_set_max_seg_size(mfc_dev->mem_dev[BANK_L_CTX], DMA_BIT_MASK(32)); vb2_dma_contig_set_max_seg_size(mfc_dev->mem_dev[BANK_R_CTX], DMA_BIT_MASK(32)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski24385.26%675.00%
Arun Kumar K4014.04%112.50%
Javier Martinez Canillas20.70%112.50%
Total285100.00%8100.00%


static void s5p_mfc_unconfigure_2port_memory(struct s5p_mfc_dev *mfc_dev) { device_unregister(mfc_dev->mem_dev[BANK_L_CTX]); device_unregister(mfc_dev->mem_dev[BANK_R_CTX]); vb2_dma_contig_clear_max_seg_size(mfc_dev->mem_dev[BANK_L_CTX]); vb2_dma_contig_clear_max_seg_size(mfc_dev->mem_dev[BANK_R_CTX]); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski51100.00%3100.00%
Total51100.00%3100.00%


static int s5p_mfc_configure_common_memory(struct s5p_mfc_dev *mfc_dev) { struct device *dev = &mfc_dev->plat_dev->dev; unsigned long mem_size = SZ_4M; unsigned int bitmap_size; if (IS_ENABLED(CONFIG_DMA_CMA) || exynos_is_iommu_available(dev)) mem_size = SZ_8M; if (mfc_mem_size) mem_size = memparse(mfc_mem_size, NULL); bitmap_size = BITS_TO_LONGS(mem_size >> PAGE_SHIFT) * sizeof(long); mfc_dev->mem_bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!mfc_dev->mem_bitmap) return -ENOMEM; mfc_dev->mem_virt = dma_alloc_coherent(dev, mem_size, &mfc_dev->mem_base, GFP_KERNEL); if (!mfc_dev->mem_virt) { kfree(mfc_dev->mem_bitmap); dev_err(dev, "failed to preallocate %ld MiB for the firmware and context buffers\n", (mem_size / SZ_1M)); return -ENOMEM; } mfc_dev->mem_size = mem_size; mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->mem_base; mfc_dev->dma_base[BANK_R_CTX] = mfc_dev->mem_base; /* * MFC hardware cannot handle 0 as a base address, so mark first 128K * as used (to keep required base alignment) and adjust base address */ if (mfc_dev->mem_base == (dma_addr_t)0) { unsigned int offset = 1 << MFC_BASE_ALIGN_ORDER; bitmap_set(mfc_dev->mem_bitmap, 0, offset >> PAGE_SHIFT); mfc_dev->dma_base[BANK_L_CTX] += offset; mfc_dev->dma_base[BANK_R_CTX] += offset; } /* Firmware allocation cannot fail in this case */ s5p_mfc_alloc_firmware(mfc_dev); mfc_dev->mem_dev[BANK_L_CTX] = mfc_dev->mem_dev[BANK_R_CTX] = dev; vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32)); dev_info(dev, "preallocated %ld MiB buffer for the firmware and context buffers\n", (mem_size / SZ_1M)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski278100.00%9100.00%
Total278100.00%9100.00%


static void s5p_mfc_unconfigure_common_memory(struct s5p_mfc_dev *mfc_dev) { struct device *dev = &mfc_dev->plat_dev->dev; dma_free_coherent(dev, mfc_dev->mem_size, mfc_dev->mem_virt, mfc_dev->mem_base); kfree(mfc_dev->mem_bitmap); vb2_dma_contig_clear_max_seg_size(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski52100.00%3100.00%
Total52100.00%3100.00%


static int s5p_mfc_configure_dma_memory(struct s5p_mfc_dev *mfc_dev) { struct device *dev = &mfc_dev->plat_dev->dev; if (exynos_is_iommu_available(dev) || !IS_TWOPORT(mfc_dev)) return s5p_mfc_configure_common_memory(mfc_dev); else return s5p_mfc_configure_2port_memory(mfc_dev); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski49100.00%3100.00%
Total49100.00%3100.00%


static void s5p_mfc_unconfigure_dma_memory(struct s5p_mfc_dev *mfc_dev) { struct device *dev = &mfc_dev->plat_dev->dev; s5p_mfc_release_firmware(mfc_dev); if (exynos_is_iommu_available(dev) || !IS_TWOPORT(mfc_dev)) s5p_mfc_unconfigure_common_memory(mfc_dev); else s5p_mfc_unconfigure_2port_memory(mfc_dev); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski52100.00%4100.00%
Total52100.00%4100.00%

/* MFC probe function */
static int s5p_mfc_probe(struct platform_device *pdev) { struct s5p_mfc_dev *dev; struct video_device *vfd; struct resource *res; int ret; pr_debug("%s++\n", __func__); dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) { dev_err(&pdev->dev, "Not enough memory for MFC device\n"); return -ENOMEM; } spin_lock_init(&dev->irqlock); spin_lock_init(&dev->condlock); dev->plat_dev = pdev; if (!dev->plat_dev) { dev_err(&pdev->dev, "No platform data specified\n"); return -ENODEV; } dev->variant = of_device_get_match_data(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->regs_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dev->regs_base)) return PTR_ERR(dev->regs_base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to get irq resource\n"); return -ENOENT; } dev->irq = res->start; ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq, 0, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret); return ret; } ret = s5p_mfc_configure_dma_memory(dev); if (ret < 0) { dev_err(&pdev->dev, "failed to configure DMA memory\n"); return ret; } ret = s5p_mfc_init_pm(dev); if (ret < 0) { dev_err(&pdev->dev, "failed to get mfc clock source\n"); goto err_dma; } mutex_init(&dev->mfc_mutex); init_waitqueue_head(&dev->queue); dev->hw_lock = 0; INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker); atomic_set(&dev->watchdog_cnt, 0); init_timer(&dev->watchdog_timer); dev->watchdog_timer.data = (unsigned long)dev; dev->watchdog_timer.function = s5p_mfc_watchdog; ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto err_v4l2_dev_reg; /* decoder */ vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto err_dec_alloc; } vfd->fops = &s5p_mfc_fops; vfd->ioctl_ops = get_dec_v4l2_ioctl_ops(); vfd->release = video_device_release; vfd->lock = &dev->mfc_mutex; vfd->v4l2_dev = &dev->v4l2_dev; vfd->vfl_dir = VFL_DIR_M2M; snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME); dev->vfd_dec = vfd; video_set_drvdata(vfd, dev); /* encoder */ vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto err_enc_alloc; } vfd->fops = &s5p_mfc_fops; vfd->ioctl_ops = get_enc_v4l2_ioctl_ops(); vfd->release = video_device_release; vfd->lock = &dev->mfc_mutex; vfd->v4l2_dev = &dev->v4l2_dev; vfd->vfl_dir = VFL_DIR_M2M; snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME); dev->vfd_enc = vfd; video_set_drvdata(vfd, dev); platform_set_drvdata(pdev, dev); /* Initialize HW ops and commands based on MFC version */ s5p_mfc_init_hw_ops(dev); s5p_mfc_init_hw_cmds(dev); s5p_mfc_init_regs(dev); /* Register decoder and encoder */ ret = video_register_device(dev->vfd_dec, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_dec_reg; } v4l2_info(&dev->v4l2_dev, "decoder registered as /dev/video%d\n", dev->vfd_dec->num); ret = video_register_device(dev->vfd_enc, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_enc_reg; } v4l2_info(&dev->v4l2_dev, "encoder registered as /dev/video%d\n", dev->vfd_enc->num); pr_debug("%s--\n", __func__); return 0; /* Deinit MFC if probe had failed */ err_enc_reg: video_unregister_device(dev->vfd_dec); err_dec_reg: video_device_release(dev->vfd_enc); err_enc_alloc: video_device_release(dev->vfd_dec); err_dec_alloc: v4l2_device_unregister(&dev->v4l2_dev); err_v4l2_dev_reg: s5p_mfc_final_pm(dev); err_dma: s5p_mfc_unconfigure_dma_memory(dev); pr_debug("%s-- with error\n", __func__); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski52764.90%15.26%
Javier Martinez Canillas10312.68%15.26%
Marek Szyprowski9912.19%421.05%
Arun Kumar K334.06%526.32%
Sachin Kamat202.46%210.53%
Thierry Reding80.99%15.26%
Hans Verkuil60.74%15.26%
Wei Yongjun60.74%15.26%
Kiran AVND50.62%15.26%
Joonyoung Shim40.49%15.26%
Michael Opdenacker10.12%15.26%
Total812100.00%19100.00%

/* Remove the driver */
static int s5p_mfc_remove(struct platform_device *pdev) { struct s5p_mfc_dev *dev = platform_get_drvdata(pdev); struct s5p_mfc_ctx *ctx; int i; v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name); /* * Clear ctx dev pointer to avoid races between s5p_mfc_remove() * and s5p_mfc_release() and s5p_mfc_release() accessing ctx->dev * after s5p_mfc_remove() is run during unbind. */ mutex_lock(&dev->mfc_mutex); for (i = 0; i < MFC_NUM_CONTEXTS; i++) { ctx = dev->ctx[i]; if (!ctx) continue; /* clear ctx->dev */ ctx->dev = NULL; } mutex_unlock(&dev->mfc_mutex); del_timer_sync(&dev->watchdog_timer); flush_work(&dev->watchdog_work); video_unregister_device(dev->vfd_enc); video_unregister_device(dev->vfd_dec); video_device_release(dev->vfd_enc); video_device_release(dev->vfd_dec); v4l2_device_unregister(&dev->v4l2_dev); s5p_mfc_unconfigure_dma_memory(dev); s5p_mfc_final_pm(dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski7848.15%116.67%
Shuah Khan7646.91%233.33%
Marek Szyprowski53.09%233.33%
Bhaktipriya Shridhar31.85%116.67%
Total162100.00%6100.00%

#ifdef CONFIG_PM_SLEEP
static int s5p_mfc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev); int ret; if (m_dev->num_inst == 0) return 0; if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) { mfc_err("Error: going to suspend for a second time\n"); return -EIO; } /* Check if we're processing then wait if it necessary. */ while (test_and_set_bit(0, &m_dev->hw_lock) != 0) { /* Try and lock the HW */ /* Wait on the interrupt waitqueue */ ret = wait_event_interruptible_timeout(m_dev->queue, m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT)); if (ret == 0) { mfc_err("Waiting for hardware to finish timed out\n"); clear_bit(0, &m_dev->enter_suspend); return -EIO; } } ret = s5p_mfc_sleep(m_dev); if (ret) { clear_bit(0, &m_dev->enter_suspend); clear_bit(0, &m_dev->hw_lock); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski12573.53%133.33%
Prathyush K4124.12%133.33%
Sachin Kamat42.35%133.33%
Total170100.00%3100.00%


static int s5p_mfc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev); if (m_dev->num_inst == 0) return 0; return s5p_mfc_wakeup(m_dev); }

Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski48100.00%1100.00%
Total48100.00%1100.00%

#endif /* Power management */ static const struct dev_pm_ops s5p_mfc_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume) }; static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = { .h264_ctx = MFC_H264_CTX_BUF_SIZE, .non_h264_ctx = MFC_CTX_BUF_SIZE, .dsc = DESC_BUF_SIZE, .shm = SHARED_BUF_SIZE, }; static struct s5p_mfc_buf_size buf_size_v5 = { .fw = MAX_FW_SIZE, .cpb = MAX_CPB_SIZE, .priv = &mfc_buf_size_v5, }; static struct s5p_mfc_variant mfc_drvdata_v5 = { .version = MFC_VERSION, .version_bit = MFC_V5_BIT, .port_num = MFC_NUM_PORTS, .buf_size = &buf_size_v5, .fw_name[0] = "s5p-mfc.fw", .clk_names = {"mfc", "sclk_mfc"}, .num_clocks = 2, .use_clock_gating = true, }; static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = { .dev_ctx = MFC_CTX_BUF_SIZE_V6, .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6, .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6, .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V6, .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6, }; static struct s5p_mfc_buf_size buf_size_v6 = { .fw = MAX_FW_SIZE_V6, .cpb = MAX_CPB_SIZE_V6, .priv = &mfc_buf_size_v6, }; static struct s5p_mfc_variant mfc_drvdata_v6 = { .version = MFC_VERSION_V6, .version_bit = MFC_V6_BIT, .port_num = MFC_NUM_PORTS_V6, .buf_size = &buf_size_v6, .fw_name[0] = "s5p-mfc-v6.fw", /* * v6-v2 firmware contains bug fixes and interface change * for init buffer command */ .fw_name[1] = "s5p-mfc-v6-v2.fw", .clk_names = {"mfc"}, .num_clocks = 1, }; static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = { .dev_ctx = MFC_CTX_BUF_SIZE_V7, .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7, .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7, .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V7, .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7, }; static struct s5p_mfc_buf_size buf_size_v7 = { .fw = MAX_FW_SIZE_V7, .cpb = MAX_CPB_SIZE_V7, .priv = &mfc_buf_size_v7, }; static struct s5p_mfc_variant mfc_drvdata_v7 = { .version = MFC_VERSION_V7, .version_bit = MFC_V7_BIT, .port_num = MFC_NUM_PORTS_V7, .buf_size = &buf_size_v7, .fw_name[0] = "s5p-mfc-v7.fw", .clk_names = {"mfc", "sclk_mfc"}, .num_clocks = 2, }; static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = { .dev_ctx = MFC_CTX_BUF_SIZE_V8, .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V8, .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V8, .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V8, .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V8, }; static struct s5p_mfc_buf_size buf_size_v8 = { .fw = MAX_FW_SIZE_V8, .cpb = MAX_CPB_SIZE_V8, .priv = &mfc_buf_size_v8, }; static struct s5p_mfc_variant mfc_drvdata_v8 = { .version = MFC_VERSION_V8, .version_bit = MFC_V8_BIT, .port_num = MFC_NUM_PORTS_V8, .buf_size = &buf_size_v8, .fw_name[0] = "s5p-mfc-v8.fw", .clk_names = {"mfc"}, .num_clocks = 1, }; static struct s5p_mfc_variant mfc_drvdata_v8_5433 = { .version = MFC_VERSION_V8, .version_bit = MFC_V8_BIT, .port_num = MFC_NUM_PORTS_V8, .buf_size = &buf_size_v8, .fw_name[0] = "s5p-mfc-v8.fw", .clk_names = {"pclk", "aclk", "aclk_xiu"}, .num_clocks = 3, }; static const struct of_device_id exynos_mfc_match[] = { { .compatible = "samsung,mfc-v5", .data = &mfc_drvdata_v5, }, { .compatible = "samsung,mfc-v6", .data = &mfc_drvdata_v6, }, { .compatible = "samsung,mfc-v7", .data = &mfc_drvdata_v7, }, { .compatible = "samsung,mfc-v8", .data = &mfc_drvdata_v8, }, { .compatible = "samsung,exynos5433-mfc", .data = &mfc_drvdata_v8_5433, }, {}, }; MODULE_DEVICE_TABLE(of, exynos_mfc_match); static struct platform_driver s5p_mfc_driver = { .probe = s5p_mfc_probe, .remove = s5p_mfc_remove, .driver = { .name = S5P_MFC_NAME, .pm = &s5p_mfc_pm_ops, .of_match_table = exynos_mfc_match, }, }; module_platform_driver(s5p_mfc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>"); MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");

Overall Contributors

PersonTokensPropCommitsCommitProp
Kamil Debski464956.58%1110.58%
Marek Szyprowski116214.14%2423.08%
Arun Kumar K7108.64%1110.58%
Andrzej Hajda6477.87%98.65%
Jeongtae Park2062.51%10.96%
Javier Martinez Canillas1291.57%32.88%
Kiran AVND1041.27%32.88%
Shuah Khan1031.25%32.88%
Hans Verkuil991.20%21.92%
Pawel Osciak961.17%65.77%
Prathyush K640.78%21.92%
Junghak Sung530.65%32.88%
Sachin Kamat330.40%65.77%
Sakari Ailus250.30%21.92%
Ilja Friedel230.28%10.96%
Sjoerd Simons210.26%10.96%
Donghwa Lee140.17%10.96%
Douglas Anderson140.17%10.96%
Mauro Carvalho Chehab120.15%21.92%
Lad Prabhakar90.11%10.96%
Philipp Zabel80.10%10.96%
Thierry Reding80.10%10.96%
Arun Mankuzhi60.07%10.96%
Wei Yongjun60.07%10.96%
Joonyoung Shim40.05%10.96%
Bhaktipriya Shridhar40.05%10.96%
Jonathan McCrohan30.04%10.96%
Axel Lin20.02%10.96%
Zhaowei Yuan10.01%10.96%
Julia Lawall10.01%10.96%
Michael Opdenacker10.01%10.96%
Total8217100.00%104100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.