cregit-Linux how code gets into the kernel

Release 4.11 drivers/scsi/ufs/ufs-qcom.c

Directory: drivers/scsi/ufs
/*
 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#include <linux/time.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-qcom-ufs.h>

#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
#include "ufs_quirks.h"

#define UFS_QCOM_DEFAULT_DBG_PRINT_EN	\
	(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)

enum {
	
TSTBUS_UAWM,
	
TSTBUS_UARM,
	
TSTBUS_TXUC,
	
TSTBUS_RXUC,
	
TSTBUS_DFC,
	
TSTBUS_TRLUT,
	
TSTBUS_TMRLUT,
	
TSTBUS_OCSC,
	
TSTBUS_UTP_HCI,
	
TSTBUS_COMBINED,
	
TSTBUS_WRAPPER,
	
TSTBUS_UNIPRO,
	
TSTBUS_MAX,
};


static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];

static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
						       u32 clk_cycles);


static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, char *prefix) { print_hex_dump(KERN_ERR, prefix, len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, 16, 4, (void __force *)hba->mmio_base + offset, len * 4, false); }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi57100.00%1100.00%
Total57100.00%1100.00%


static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, char *prefix, void *priv) { ufs_qcom_dump_regs(hba, offset, len, prefix); }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi36100.00%1100.00%
Total36100.00%1100.00%


static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) { int err = 0; err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); if (err) dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", __func__, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi54100.00%1100.00%
Total54100.00%1100.00%


static int ufs_qcom_host_clk_get(struct device *dev, const char *name, struct clk **clk_out) { struct clk *clk; int err = 0; clk = devm_clk_get(dev, name); if (IS_ERR(clk)) { err = PTR_ERR(clk); dev_err(dev, "%s: failed to get %s err %d", __func__, name, err); } else { *clk_out = clk; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi81100.00%1100.00%
Total81100.00%1100.00%


static int ufs_qcom_host_clk_enable(struct device *dev, const char *name, struct clk *clk) { int err = 0; err = clk_prepare_enable(clk); if (err) dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi53100.00%1100.00%
Total53100.00%1100.00%


static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) { if (!host->is_lane_clks_enabled) return; if (host->hba->lanes_per_direction > 1) clk_disable_unprepare(host->tx_l1_sync_clk); clk_disable_unprepare(host->tx_l0_sync_clk); if (host->hba->lanes_per_direction > 1) clk_disable_unprepare(host->rx_l1_sync_clk); clk_disable_unprepare(host->rx_l0_sync_clk); host->is_lane_clks_enabled = false; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi73100.00%2100.00%
Total73100.00%2100.00%


static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) { int err = 0; struct device *dev = host->hba->dev; if (host->is_lane_clks_enabled) return 0; err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", host->rx_l0_sync_clk); if (err) goto out; err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", host->tx_l0_sync_clk); if (err) goto disable_rx_l0; if (host->hba->lanes_per_direction > 1) { err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", host->rx_l1_sync_clk); if (err) goto disable_tx_l0; err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", host->tx_l1_sync_clk); if (err) goto disable_rx_l1; } host->is_lane_clks_enabled = true; goto out; disable_rx_l1: if (host->hba->lanes_per_direction > 1) clk_disable_unprepare(host->rx_l1_sync_clk); disable_tx_l0: clk_disable_unprepare(host->tx_l0_sync_clk); disable_rx_l0: clk_disable_unprepare(host->rx_l0_sync_clk); out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi179100.00%2100.00%
Total179100.00%2100.00%


static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) { int err = 0; struct device *dev = host->hba->dev; err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", &host->rx_l0_sync_clk); if (err) goto out; err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", &host->tx_l0_sync_clk); if (err) goto out; /* In case of single lane per direction, don't read lane1 clocks */ if (host->hba->lanes_per_direction > 1) { err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", &host->rx_l1_sync_clk); if (err) goto out; err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", &host->tx_l1_sync_clk); } out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi122100.00%2100.00%
Total122100.00%2100.00%


static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; u32 tx_lanes; int err = 0; err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); if (err) goto out; err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); if (err) dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", __func__); out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi84100.00%2100.00%
Total84100.00%2100.00%


static int ufs_qcom_check_hibern8(struct ufs_hba *hba) { int err; u32 tx_fsm_val = 0; unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); do { err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), &tx_fsm_val); if (err || tx_fsm_val == TX_FSM_HIBERN8) break; /* sleep for max. 200us */ usleep_range(100, 200); } while (time_before(jiffies, timeout)); /* * we might have scheduled out for long during polling so * check the state again. */ if (time_after(jiffies, timeout)) err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), &tx_fsm_val); if (err) { dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", __func__, err); } else if (tx_fsm_val != TX_FSM_HIBERN8) { err = tx_fsm_val; dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", __func__, err); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi158100.00%2100.00%
Total158100.00%2100.00%


static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) { ufshcd_rmwl(host->hba, QUNIPRO_SEL, ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, REG_UFS_CFG1); /* make sure above configuration is applied before we return */ mb(); }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi35100.00%1100.00%
Total35100.00%1100.00%


static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; int ret = 0; bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) ? true : false; /* Assert PHY reset and apply PHY calibration values */ ufs_qcom_assert_reset(hba); /* provide 1ms delay to let the reset pulse propagate */ usleep_range(1000, 1100); ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); if (ret) { dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", __func__, ret); goto out; } /* De-assert PHY reset and start serdes */ ufs_qcom_deassert_reset(hba); /* * after reset deassertion, phy will need all ref clocks, * voltage, current to settle down before starting serdes. */ usleep_range(1000, 1100); ret = ufs_qcom_phy_start_serdes(phy); if (ret) { dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n", __func__, ret); goto out; } ret = ufs_qcom_phy_is_pcs_ready(phy); if (ret) dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", __func__, ret); ufs_qcom_select_unipro_mode(host); out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi170100.00%4100.00%
Total170100.00%4100.00%

/* * The UTP controller has a number of internal clock gating cells (CGCs). * Internal hardware sub-modules within the UTP controller control the CGCs. * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved * in a specific operation, UTP controller CGCs are by default disabled and * this function enables them (after every UFS link startup) to save some power * leakage. */
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) { ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ mb(); }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi31100.00%1100.00%
Total31100.00%1100.00%


static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err = 0; switch (status) { case PRE_CHANGE: ufs_qcom_power_up_sequence(hba); /* * The PHY PLL output is the source of tx/rx lane symbol * clocks, hence, enable the lane clocks only after PHY * is initialized. */ err = ufs_qcom_enable_lane_clks(host); break; case POST_CHANGE: /* check if UFS PHY moved from DISABLED to HIBERN8 */ err = ufs_qcom_check_hibern8(hba); ufs_qcom_enable_hw_clk_gating(hba); break; default: dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); err = -EINVAL; break; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi93100.00%3100.00%
Total93100.00%3100.00%

/** * Returns zero for success and non-zero in case of a failure */
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate, bool update_link_startup_timer) { int ret = 0; struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; u32 core_clk_period_in_ns; u32 tx_clk_cycles_per_us = 0; unsigned long core_clk_rate = 0; u32 core_clk_cycles_per_us = 0; static u32 pwm_fr_table[][2] = { {UFS_PWM_G1, 0x1}, {UFS_PWM_G2, 0x1}, {UFS_PWM_G3, 0x1}, {UFS_PWM_G4, 0x1}, }; static u32 hs_fr_table_rA[][2] = { {UFS_HS_G1, 0x1F}, {UFS_HS_G2, 0x3e}, {UFS_HS_G3, 0x7D}, }; static u32 hs_fr_table_rB[][2] = { {UFS_HS_G1, 0x24}, {UFS_HS_G2, 0x49}, {UFS_HS_G3, 0x92}, }; /* * The Qunipro controller does not use following registers: * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & * UFS_REG_PA_LINK_STARTUP_TIMER * But UTP controller uses SYS1CLK_1US_REG register for Interrupt * Aggregation logic. */ if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) goto out; if (gear == 0) { dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); goto out_error; } list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) core_clk_rate = clk_get_rate(clki->clk); } /* If frequency is smaller than 1MHz, set to 1MHz */ if (core_clk_rate < DEFAULT_CLK_RATE_HZ) core_clk_rate = DEFAULT_CLK_RATE_HZ; core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); /* * make sure above write gets applied before we return from * this function. */ mb(); } if (ufs_qcom_cap_qunipro(host)) goto out; core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; core_clk_period_in_ns &= MASK_CLK_NS_REG; switch (hs) { case FASTAUTO_MODE: case FAST_MODE: if (rate == PA_HS_MODE_A) { if (gear > ARRAY_SIZE(hs_fr_table_rA)) { dev_err(hba->dev, "%s: index %d exceeds table size %zu\n", __func__, gear, ARRAY_SIZE(hs_fr_table_rA)); goto out_error; } tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; } else if (rate == PA_HS_MODE_B) { if (gear > ARRAY_SIZE(hs_fr_table_rB)) { dev_err(hba->dev, "%s: index %d exceeds table size %zu\n", __func__, gear, ARRAY_SIZE(hs_fr_table_rB)); goto out_error; } tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; } else { dev_err(hba->dev, "%s: invalid rate = %d\n", __func__, rate); goto out_error; } break; case SLOWAUTO_MODE: case SLOW_MODE: if (gear > ARRAY_SIZE(pwm_fr_table)) { dev_err(hba->dev, "%s: index %d exceeds table size %zu\n", __func__, gear, ARRAY_SIZE(pwm_fr_table)); goto out_error; } tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; break; case UNCHANGED: default: dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); goto out_error; } if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != (core_clk_period_in_ns | tx_clk_cycles_per_us)) { /* this register 2 fields shall be written at once */ ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, REG_UFS_TX_SYMBOL_CLK_NS_US); /* * make sure above write gets applied before we return from * this function. */ mb(); } if (update_link_startup_timer) { ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), REG_UFS_PA_LINK_STARTUP_TIMER); /* * make sure that this configuration is applied before * we return */ mb(); } goto out; out_error: ret = -EINVAL; out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi574100.00%4100.00%
Total574100.00%4100.00%


static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { int err = 0; struct ufs_qcom_host *host = ufshcd_get_variant(hba); switch (status) { case PRE_CHANGE: if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); err = -EINVAL; goto out; } if (ufs_qcom_cap_qunipro(host)) /* * set unipro core clock cycles to 150 & clear clock * divider */ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); /* * Some UFS devices (and may be host) have issues if LCC is * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 * before link startup which will make sure that both host * and device TX LCC are disabled once link startup is * completed. */ if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); break; case POST_CHANGE: ufs_qcom_link_startup_post_change(hba); break; default: break; } out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi133100.00%3100.00%
Total133100.00%3100.00%


static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; int ret = 0; if (ufs_qcom_is_link_off(hba)) { /* * Disable the tx/rx lane symbol clocks before PHY is * powered down as the PLL source should be disabled * after downstream clocks are disabled. */ ufs_qcom_disable_lane_clks(host); phy_power_off(phy); /* Assert PHY soft reset */ ufs_qcom_assert_reset(hba); goto out; } /* * If UniPro link is not active, PHY ref_clk, main PHY analog power * rail and low noise analog power rail for PLL can be switched off. */ if (!ufs_qcom_is_link_active(hba)) { ufs_qcom_disable_lane_clks(host); phy_power_off(phy); } out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi94100.00%3100.00%
Total94100.00%3100.00%


static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; int err; err = phy_power_on(phy); if (err) { dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", __func__, err); goto out; } err = ufs_qcom_enable_lane_clks(host); if (err) goto out; hba->is_sys_suspended = false; out: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi91100.00%3100.00%
Total91100.00%3100.00%

struct ufs_qcom_dev_params { u32 pwm_rx_gear; /* pwm rx gear to work in */ u32 pwm_tx_gear; /* pwm tx gear to work in */ u32 hs_rx_gear; /* hs rx gear to work in */ u32 hs_tx_gear; /* hs tx gear to work in */ u32 rx_lanes; /* number of rx lanes */ u32 tx_lanes; /* number of tx lanes */ u32 rx_pwr_pwm; /* rx pwm working pwr */ u32 tx_pwr_pwm; /* tx pwm working pwr */ u32 rx_pwr_hs; /* rx hs working pwr */ u32 tx_pwr_hs; /* tx hs working pwr */ u32 hs_rate; /* rate A/B to work in HS */ u32 desired_working_mode; };
static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, struct ufs_pa_layer_attr *dev_max, struct ufs_pa_layer_attr *agreed_pwr) { int min_qcom_gear; int min_dev_gear; bool is_dev_sup_hs = false; bool is_qcom_max_hs = false; if (dev_max->pwr_rx == FAST_MODE) is_dev_sup_hs = true; if (qcom_param->desired_working_mode == FAST) { is_qcom_max_hs = true; min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear, qcom_param->hs_tx_gear); } else { min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear, qcom_param->pwm_tx_gear); } /* * device doesn't support HS but qcom_param->desired_working_mode is * HS, thus device and qcom_param don't agree */ if (!is_dev_sup_hs && is_qcom_max_hs) { pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n", __func__); return -ENOTSUPP; } else if (is_dev_sup_hs && is_qcom_max_hs) { /* * since device supports HS, it supports FAST_MODE. * since qcom_param->desired_working_mode is also HS * then final decision (FAST/FASTAUTO) is done according * to qcom_params as it is the restricting factor */ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = qcom_param->rx_pwr_hs; } else { /* * here qcom_param->desired_working_mode is PWM. * it doesn't matter whether device supports HS or PWM, * in both cases qcom_param->desired_working_mode will * determine the mode */ agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = qcom_param->rx_pwr_pwm; } /* * we would like tx to work in the minimum number of lanes * between device capability and vendor preferences. * the same decision will be made for rx */ agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, qcom_param->tx_lanes); agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, qcom_param->rx_lanes); /* device maximum gear is the minimum between device rx and tx gears */ min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); /* * if both device capabilities and vendor pre-defined preferences are * both HS or both PWM then set the minimum gear to be the chosen * working gear. * if one is PWM and one is HS then the one that is PWM get to decide * what is the gear, as it is the one that also decided previously what * pwr the device will be configured to. */ if ((is_dev_sup_hs && is_qcom_max_hs) || (!is_dev_sup_hs && !is_qcom_max_hs)) agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_t(u32, min_dev_gear, min_qcom_gear); else if (!is_dev_sup_hs) agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear; else agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear; agreed_pwr->hs_rate = qcom_param->hs_rate; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi278100.00%1100.00%
Total278100.00%1100.00%

#ifdef CONFIG_MSM_BUS_SCALING
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, const char *speed_mode) { struct device *dev = host->hba->dev; struct device_node *np = dev->of_node; int err; const char *key = "qcom,bus-vector-names"; if (!speed_mode) { err = -EINVAL; goto out; } if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) err = of_property_match_string(np, key, "MAX"); else err = of_property_match_string(np, key, speed_mode); out: if (err < 0) dev_err(dev, "%s: Invalid %s mode %d\n", __func__, speed_mode, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi125100.00%2100.00%
Total125100.00%2100.00%


static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) { int gear = max_t(u32, p->gear_rx, p->gear_tx); int lanes = max_t(u32, p->lane_rx, p->lane_tx); int pwr; /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ if (!gear) gear = 1; if (!lanes) lanes = 1; if (!p->pwr_rx && !p->pwr_tx) { pwr = SLOWAUTO_MODE; snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { pwr = FAST_MODE; snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); } else { pwr = SLOW_MODE; snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", "PWM", gear, lanes); } }

Contributors

PersonTokensPropCommitsCommitProp
Yaniv Gardi176100.00%3100.00%
Total176100.00%3100.00%


static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) { int err = 0; if (vote != host->bus_vote.curr_vote) {