Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Stanley Chu | 1229 | 97.93% | 1 | 25.00% |
Anders Roxell | 20 | 1.59% | 1 | 25.00% |
Yue haibing | 3 | 0.24% | 1 | 25.00% |
Nathan Chancellor | 3 | 0.24% | 1 | 25.00% |
Total | 1255 | 4 |
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 MediaTek Inc. * Authors: * Stanley Chu <stanley.chu@mediatek.com> * Peter Wang <peter.wang@mediatek.com> */ #include <linux/of.h> #include <linux/of_address.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include "ufshcd.h" #include "ufshcd-pltfrm.h" #include "unipro.h" #include "ufs-mediatek.h" static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) { u32 tmp; if (enable) { ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); tmp = tmp | (1 << RX_SYMBOL_CLK_GATE_EN) | (1 << SYS_CLK_GATE_EN) | (1 << TX_CLK_GATE_EN); ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE); ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); } else { ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) | (1 << SYS_CLK_GATE_EN) | (1 << TX_CLK_GATE_EN)); ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp); tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE); ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp); } } static int ufs_mtk_bind_mphy(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct device *dev = hba->dev; struct device_node *np = dev->of_node; int err = 0; host->mphy = devm_of_phy_get_by_index(dev, np, 0); if (host->mphy == ERR_PTR(-EPROBE_DEFER)) { /* * UFS driver might be probed before the phy driver does. * In that case we would like to return EPROBE_DEFER code. */ err = -EPROBE_DEFER; dev_info(dev, "%s: required phy hasn't probed yet. err = %d\n", __func__, err); } else if (IS_ERR(host->mphy)) { err = PTR_ERR(host->mphy); dev_info(dev, "%s: PHY get failed %d\n", __func__, err); } if (err) host->mphy = NULL; return err; } /** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * * Returns 0 on success, non-zero on failure. */ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); int ret = -EINVAL; /* * In case ufs_mtk_init() is not yet done, simply ignore. * This ufs_mtk_setup_clocks() shall be called from * ufs_mtk_init() after init is done. */ if (!host) return 0; switch (status) { case PRE_CHANGE: if (!on) ret = phy_power_off(host->mphy); break; case POST_CHANGE: if (on) ret = phy_power_on(host->mphy); break; } return ret; } /** * ufs_mtk_init - find other essential mmio bases * @hba: host controller instance * * Binds PHY with controller and powers up PHY enabling clocks * and regulators. * * Returns -EPROBE_DEFER if binding fails, returns negative error * on phy power up failure and returns zero on success. */ static int ufs_mtk_init(struct ufs_hba *hba) { struct ufs_mtk_host *host; struct device *dev = hba->dev; int err = 0; host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { err = -ENOMEM; dev_info(dev, "%s: no memory for mtk ufs host\n", __func__); goto out; } host->hba = hba; ufshcd_set_variant(hba, host); err = ufs_mtk_bind_mphy(hba); if (err) goto out_variant_clear; /* * ufshcd_vops_init() is invoked after * ufshcd_setup_clock(true) in ufshcd_hba_init() thus * phy clock setup is skipped. * * Enable phy clocks specifically here. */ ufs_mtk_setup_clocks(hba, true, POST_CHANGE); goto out; out_variant_clear: ufshcd_set_variant(hba, NULL); out: return err; } static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { struct ufs_dev_params host_cap; int ret; host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX; host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX; host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX; host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX; host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX; host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX; host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM; host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM; host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS; host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS; host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE; host_cap.desired_working_mode = UFS_MTK_LIMIT_DESIRED_MODE; ret = ufshcd_get_pwr_dev_param(&host_cap, dev_max_params, dev_req_params); if (ret) { pr_info("%s: failed to determine capabilities\n", __func__); } return ret; } static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status stage, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { int ret = 0; switch (stage) { case PRE_CHANGE: ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, dev_req_params); break; case POST_CHANGE: break; default: ret = -EINVAL; break; } return ret; } static int ufs_mtk_pre_link(struct ufs_hba *hba) { int ret; u32 tmp; /* disable deep stall */ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); if (ret) return ret; tmp &= ~(1 << 6); ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); return ret; } static int ufs_mtk_post_link(struct ufs_hba *hba) { /* disable device LCC */ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); return 0; } static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, enum ufs_notify_change_status stage) { int ret = 0; switch (stage) { case PRE_CHANGE: ret = ufs_mtk_pre_link(hba); break; case POST_CHANGE: ret = ufs_mtk_post_link(hba); break; default: ret = -EINVAL; break; } return ret; } static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (ufshcd_is_link_hibern8(hba)) phy_power_off(host->mphy); return 0; } static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (ufshcd_is_link_hibern8(hba)) phy_power_on(host->mphy); return 0; } /** * struct ufs_hba_mtk_vops - UFS MTK specific variant operations * * The variant operations configure the necessary controller and PHY * handshake during initialization. */ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .name = "mediatek.ufshci", .init = ufs_mtk_init, .setup_clocks = ufs_mtk_setup_clocks, .link_startup_notify = ufs_mtk_link_startup_notify, .pwr_change_notify = ufs_mtk_pwr_change_notify, .suspend = ufs_mtk_suspend, .resume = ufs_mtk_resume, }; /** * ufs_mtk_probe - probe routine of the driver * @pdev: pointer to Platform device handle * * Return zero for success and non-zero for failure */ static int ufs_mtk_probe(struct platform_device *pdev) { int err; struct device *dev = &pdev->dev; /* perform generic probe */ err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); if (err) dev_info(dev, "probe failed %d\n", err); return err; } /** * ufs_mtk_remove - set driver_data of the device to NULL * @pdev: pointer to platform device handle * * Always return 0 */ static int ufs_mtk_remove(struct platform_device *pdev) { struct ufs_hba *hba = platform_get_drvdata(pdev); pm_runtime_get_sync(&(pdev)->dev); ufshcd_remove(hba); return 0; } static const struct of_device_id ufs_mtk_of_match[] = { { .compatible = "mediatek,mt8183-ufshci"}, {}, }; static const struct dev_pm_ops ufs_mtk_pm_ops = { .suspend = ufshcd_pltfrm_suspend, .resume = ufshcd_pltfrm_resume, .runtime_suspend = ufshcd_pltfrm_runtime_suspend, .runtime_resume = ufshcd_pltfrm_runtime_resume, .runtime_idle = ufshcd_pltfrm_runtime_idle, }; static struct platform_driver ufs_mtk_pltform = { .probe = ufs_mtk_probe, .remove = ufs_mtk_remove, .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd-mtk", .pm = &ufs_mtk_pm_ops, .of_match_table = ufs_mtk_of_match, }, }; MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>"); MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>"); MODULE_DESCRIPTION("MediaTek UFS Host Driver"); MODULE_LICENSE("GPL v2"); module_platform_driver(ufs_mtk_pltform);
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1