# SPDX-License-Identifier: GPL-2.0
#
-# Phy drivers for Intel Lightning Mountain(LGM) platform
+# Phy drivers for Intel platforms
#
-config PHY_INTEL_COMBO
- bool "Intel ComboPHY driver"
+config PHY_INTEL_LGM_COMBO
+ bool "Intel Lightning Mountain ComboPHY driver"
depends on X86 || COMPILE_TEST
depends on OF && HAS_IOMEM
select MFD_SYSCON
chipsets which provides PHYs for various controllers, EMAC,
SATA and PCIe.
-config PHY_INTEL_EMMC
- tristate "Intel EMMC PHY driver"
+config PHY_INTEL_LGM_EMMC
+ tristate "Intel Lightning Mountain EMMC PHY driver"
depends on X86 || COMPILE_TEST
select GENERIC_PHY
help
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PHY_INTEL_COMBO) += phy-intel-combo.o
-obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
+obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o
+obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Combo-PHY driver
- *
- * Copyright (C) 2019-2020 Intel Corporation.
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-
-#include <dt-bindings/phy/phy.h>
-
-#define PCIE_PHY_GEN_CTRL 0x00
-#define PCIE_PHY_CLK_PAD BIT(17)
-
-#define PAD_DIS_CFG 0x174
-
-#define PCS_XF_ATE_OVRD_IN_2 0x3008
-#define ADAPT_REQ_MSK GENMASK(5, 4)
-
-#define PCS_XF_RX_ADAPT_ACK 0x3010
-#define RX_ADAPT_ACK_BIT BIT(0)
-
-#define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
-#define REG_COMBO_MODE(x) ((x) * 0x200)
-#define REG_CLK_DISABLE(x) ((x) * 0x200 + 0x124)
-
-#define COMBO_PHY_ID(x) ((x)->parent->id)
-#define PHY_ID(x) ((x)->id)
-
-#define CLK_100MHZ 100000000
-#define CLK_156_25MHZ 156250000
-
-static const unsigned long intel_iphy_clk_rates[] = {
- CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
-};
-
-enum {
- PHY_0,
- PHY_1,
- PHY_MAX_NUM
-};
-
-/*
- * Clock Register bit fields to enable clocks
- * for ComboPhy according to the mode.
- */
-enum intel_phy_mode {
- PHY_PCIE_MODE = 0,
- PHY_XPCS_MODE,
- PHY_SATA_MODE,
-};
-
-/* ComboPhy mode Register values */
-enum intel_combo_mode {
- PCIE0_PCIE1_MODE = 0,
- PCIE_DL_MODE,
- RXAUI_MODE,
- XPCS0_XPCS1_MODE,
- SATA0_SATA1_MODE,
-};
-
-enum aggregated_mode {
- PHY_SL_MODE,
- PHY_DL_MODE,
-};
-
-struct intel_combo_phy;
-
-struct intel_cbphy_iphy {
- struct phy *phy;
- struct intel_combo_phy *parent;
- struct reset_control *app_rst;
- u32 id;
-};
-
-struct intel_combo_phy {
- struct device *dev;
- struct clk *core_clk;
- unsigned long clk_rate;
- void __iomem *app_base;
- void __iomem *cr_base;
- struct regmap *syscfg;
- struct regmap *hsiocfg;
- u32 id;
- u32 bid;
- struct reset_control *phy_rst;
- struct reset_control *core_rst;
- struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
- enum intel_phy_mode phy_mode;
- enum aggregated_mode aggr_mode;
- u32 init_cnt;
- struct mutex lock;
-};
-
-static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
- u32 val;
-
- /* Register: 0 is enable, 1 is disable */
- val = set ? 0 : mask;
-
- return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
- mask, val);
-}
-
-static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- u32 mask = BIT(cbphy->id * 2 + iphy->id);
- u32 val;
-
- /* Register: 0 is enable, 1 is disable */
- val = set ? 0 : mask;
-
- return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
-}
-
-static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
- u32 mask, u32 val)
-{
- u32 reg_val;
-
- reg_val = readl(base + reg);
- reg_val &= ~mask;
- reg_val |= val;
- writel(reg_val, base + reg);
-}
-
-static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
- int (*phy_cfg)(struct intel_cbphy_iphy *))
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- ret = phy_cfg(iphy);
- if (ret)
- return ret;
-
- if (cbphy->aggr_mode != PHY_DL_MODE)
- return 0;
-
- return phy_cfg(&cbphy->iphy[PHY_1]);
-}
-
-static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
- if (ret) {
- dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
- return ret;
- }
-
- if (cbphy->init_cnt)
- return 0;
-
- combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
- PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
-
- /* Delay for stable clock PLL */
- usleep_range(50, 100);
-
- return 0;
-}
-
-static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
- if (ret) {
- dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
- return ret;
- }
-
- if (cbphy->init_cnt)
- return 0;
-
- combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
- PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
-
- return 0;
-}
-
-static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
-{
- enum intel_combo_mode cb_mode;
- enum aggregated_mode aggr = cbphy->aggr_mode;
- struct device *dev = cbphy->dev;
- enum intel_phy_mode mode;
- int ret;
-
- mode = cbphy->phy_mode;
-
- switch (mode) {
- case PHY_PCIE_MODE:
- cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
- break;
-
- case PHY_XPCS_MODE:
- cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
- break;
-
- case PHY_SATA_MODE:
- if (aggr == PHY_DL_MODE) {
- dev_err(dev, "Mode:%u not support dual lane!\n", mode);
- return -EINVAL;
- }
-
- cb_mode = SATA0_SATA1_MODE;
- break;
- default:
- return -EINVAL;
- }
-
- ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
- if (ret)
- dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
-
- return ret;
-}
-
-static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
-{
- reset_control_assert(cbphy->core_rst);
- reset_control_assert(cbphy->phy_rst);
-}
-
-static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
-{
- reset_control_deassert(cbphy->core_rst);
- reset_control_deassert(cbphy->phy_rst);
- /* Delay to ensure reset process is done */
- usleep_range(10, 20);
-}
-
-static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- if (!cbphy->init_cnt) {
- ret = clk_prepare_enable(cbphy->core_clk);
- if (ret) {
- dev_err(cbphy->dev, "Clock enable failed!\n");
- return ret;
- }
-
- ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
- if (ret) {
- dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
- cbphy->clk_rate);
- goto clk_err;
- }
-
- intel_cbphy_rst_assert(cbphy);
- intel_cbphy_rst_deassert(cbphy);
- ret = intel_cbphy_set_mode(cbphy);
- if (ret)
- goto clk_err;
- }
-
- ret = intel_cbphy_iphy_enable(iphy, true);
- if (ret) {
- dev_err(cbphy->dev, "Failed enabling PHY core\n");
- goto clk_err;
- }
-
- ret = reset_control_deassert(iphy->app_rst);
- if (ret) {
- dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
- COMBO_PHY_ID(iphy), PHY_ID(iphy));
- goto clk_err;
- }
-
- /* Delay to ensure reset process is done */
- udelay(1);
-
- return 0;
-
-clk_err:
- clk_disable_unprepare(cbphy->core_clk);
-
- return ret;
-}
-
-static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
-{
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- ret = reset_control_assert(iphy->app_rst);
- if (ret) {
- dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
- COMBO_PHY_ID(iphy), PHY_ID(iphy));
- return ret;
- }
-
- ret = intel_cbphy_iphy_enable(iphy, false);
- if (ret) {
- dev_err(cbphy->dev, "Failed disabling PHY core\n");
- return ret;
- }
-
- if (cbphy->init_cnt)
- return 0;
-
- clk_disable_unprepare(cbphy->core_clk);
- intel_cbphy_rst_assert(cbphy);
-
- return 0;
-}
-
-static int intel_cbphy_init(struct phy *phy)
-{
- struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- mutex_lock(&cbphy->lock);
- ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
- if (ret)
- goto err;
-
- if (cbphy->phy_mode == PHY_PCIE_MODE) {
- ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
- if (ret)
- goto err;
- }
-
- cbphy->init_cnt++;
-
-err:
- mutex_unlock(&cbphy->lock);
-
- return ret;
-}
-
-static int intel_cbphy_exit(struct phy *phy)
-{
- struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
- struct intel_combo_phy *cbphy = iphy->parent;
- int ret;
-
- mutex_lock(&cbphy->lock);
- cbphy->init_cnt--;
- if (cbphy->phy_mode == PHY_PCIE_MODE) {
- ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
- if (ret)
- goto err;
- }
-
- ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
-
-err:
- mutex_unlock(&cbphy->lock);
-
- return ret;
-}
-
-static int intel_cbphy_calibrate(struct phy *phy)
-{
- struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
- struct intel_combo_phy *cbphy = iphy->parent;
- void __iomem *cr_base = cbphy->cr_base;
- int val, ret, id;
-
- if (cbphy->phy_mode != PHY_XPCS_MODE)
- return 0;
-
- id = PHY_ID(iphy);
-
- /* trigger auto RX adaptation */
- combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
- ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
- /* Wait RX adaptation to finish */
- ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
- val, val & RX_ADAPT_ACK_BIT, 10, 5000);
- if (ret)
- dev_err(cbphy->dev, "RX Adaptation failed!\n");
- else
- dev_dbg(cbphy->dev, "RX Adaptation success!\n");
-
- /* Stop RX adaptation */
- combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
- ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
-
- return ret;
-}
-
-static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
-{
- struct device *dev = cbphy->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct fwnode_reference_args ref;
- int ret;
- u32 val;
-
- cbphy->core_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(cbphy->core_clk)) {
- ret = PTR_ERR(cbphy->core_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get clk failed:%d!\n", ret);
- return ret;
- }
-
- cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
- if (IS_ERR(cbphy->core_rst)) {
- ret = PTR_ERR(cbphy->core_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get core reset control err: %d!\n", ret);
- return ret;
- }
-
- cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
- if (IS_ERR(cbphy->phy_rst)) {
- ret = PTR_ERR(cbphy->phy_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get PHY reset control err: %d!\n", ret);
- return ret;
- }
-
- cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
- if (IS_ERR(cbphy->iphy[0].app_rst)) {
- ret = PTR_ERR(cbphy->iphy[0].app_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
- return ret;
- }
-
- cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
- if (IS_ERR(cbphy->iphy[1].app_rst)) {
- ret = PTR_ERR(cbphy->iphy[1].app_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
- return ret;
- }
-
- cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
- if (IS_ERR(cbphy->app_base))
- return PTR_ERR(cbphy->app_base);
-
- cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
- if (IS_ERR(cbphy->cr_base))
- return PTR_ERR(cbphy->cr_base);
-
- /*
- * syscfg and hsiocfg variables stores the handle of the registers set
- * in which ComboPhy subsytem specific registers are subset. Using
- * Register map framework to access the registers set.
- */
- ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
- 1, 0, &ref);
- if (ret < 0)
- return ret;
-
- cbphy->id = ref.args[0];
- cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
- fwnode_handle_put(ref.fwnode);
-
- ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
- 0, &ref);
- if (ret < 0)
- return ret;
-
- cbphy->bid = ref.args[0];
- cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
- fwnode_handle_put(ref.fwnode);
-
- ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
- if (ret)
- return ret;
-
- switch (val) {
- case PHY_TYPE_PCIE:
- cbphy->phy_mode = PHY_PCIE_MODE;
- break;
-
- case PHY_TYPE_SATA:
- cbphy->phy_mode = PHY_SATA_MODE;
- break;
-
- case PHY_TYPE_XPCS:
- cbphy->phy_mode = PHY_XPCS_MODE;
- break;
-
- default:
- dev_err(dev, "Invalid PHY mode: %u\n", val);
- return -EINVAL;
- }
-
- cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
-
- if (fwnode_property_present(fwnode, "intel,aggregation"))
- cbphy->aggr_mode = PHY_DL_MODE;
- else
- cbphy->aggr_mode = PHY_SL_MODE;
-
- return 0;
-}
-
-static const struct phy_ops intel_cbphy_ops = {
- .init = intel_cbphy_init,
- .exit = intel_cbphy_exit,
- .calibrate = intel_cbphy_calibrate,
- .owner = THIS_MODULE,
-};
-
-static struct phy *intel_cbphy_xlate(struct device *dev,
- struct of_phandle_args *args)
-{
- struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
- u32 iphy_id;
-
- if (args->args_count < 1) {
- dev_err(dev, "Invalid number of arguments\n");
- return ERR_PTR(-EINVAL);
- }
-
- iphy_id = args->args[0];
- if (iphy_id >= PHY_MAX_NUM) {
- dev_err(dev, "Invalid phy instance %d\n", iphy_id);
- return ERR_PTR(-EINVAL);
- }
-
- if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
- dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
- return ERR_PTR(-EINVAL);
- }
-
- return cbphy->iphy[iphy_id].phy;
-}
-
-static int intel_cbphy_create(struct intel_combo_phy *cbphy)
-{
- struct phy_provider *phy_provider;
- struct device *dev = cbphy->dev;
- struct intel_cbphy_iphy *iphy;
- int i;
-
- for (i = 0; i < PHY_MAX_NUM; i++) {
- iphy = &cbphy->iphy[i];
- iphy->parent = cbphy;
- iphy->id = i;
-
- /* In dual lane mode skip phy creation for the second phy */
- if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
- continue;
-
- iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
- if (IS_ERR(iphy->phy)) {
- dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
- COMBO_PHY_ID(iphy), PHY_ID(iphy));
-
- return PTR_ERR(iphy->phy);
- }
-
- phy_set_drvdata(iphy->phy, iphy);
- }
-
- dev_set_drvdata(dev, cbphy);
- phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
- if (IS_ERR(phy_provider))
- dev_err(dev, "Register PHY provider failed!\n");
-
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static int intel_cbphy_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct intel_combo_phy *cbphy;
- int ret;
-
- cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
- if (!cbphy)
- return -ENOMEM;
-
- cbphy->dev = dev;
- cbphy->init_cnt = 0;
- mutex_init(&cbphy->lock);
- ret = intel_cbphy_fwnode_parse(cbphy);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, cbphy);
-
- return intel_cbphy_create(cbphy);
-}
-
-static int intel_cbphy_remove(struct platform_device *pdev)
-{
- struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
-
- intel_cbphy_rst_assert(cbphy);
- clk_disable_unprepare(cbphy->core_clk);
- return 0;
-}
-
-static const struct of_device_id of_intel_cbphy_match[] = {
- { .compatible = "intel,combo-phy" },
- { .compatible = "intel,combophy-lgm" },
- {}
-};
-
-static struct platform_driver intel_cbphy_driver = {
- .probe = intel_cbphy_probe,
- .remove = intel_cbphy_remove,
- .driver = {
- .name = "intel-combo-phy",
- .of_match_table = of_intel_cbphy_match,
- }
-};
-
-module_platform_driver(intel_cbphy_driver);
-
-MODULE_DESCRIPTION("Intel Combo-phy driver");
-MODULE_LICENSE("GPL v2");
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel eMMC PHY driver
- * Copyright (C) 2019 Intel, Corp.
- */
-
-#include <linux/bits.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-/* eMMC phy register definitions */
-#define EMMC_PHYCTRL0_REG 0xa8
-#define DR_TY_MASK GENMASK(30, 28)
-#define DR_TY_SHIFT(x) (((x) << 28) & DR_TY_MASK)
-#define OTAPDLYENA BIT(14)
-#define OTAPDLYSEL_MASK GENMASK(13, 10)
-#define OTAPDLYSEL_SHIFT(x) (((x) << 10) & OTAPDLYSEL_MASK)
-
-#define EMMC_PHYCTRL1_REG 0xac
-#define PDB_MASK BIT(0)
-#define PDB_SHIFT(x) (((x) << 0) & PDB_MASK)
-#define ENDLL_MASK BIT(7)
-#define ENDLL_SHIFT(x) (((x) << 7) & ENDLL_MASK)
-
-#define EMMC_PHYCTRL2_REG 0xb0
-#define FRQSEL_25M 0
-#define FRQSEL_50M 1
-#define FRQSEL_100M 2
-#define FRQSEL_150M 3
-#define FRQSEL_MASK GENMASK(24, 22)
-#define FRQSEL_SHIFT(x) (((x) << 22) & FRQSEL_MASK)
-
-#define EMMC_PHYSTAT_REG 0xbc
-#define CALDONE_MASK BIT(9)
-#define DLLRDY_MASK BIT(8)
-#define IS_CALDONE(x) ((x) & CALDONE_MASK)
-#define IS_DLLRDY(x) ((x) & DLLRDY_MASK)
-
-struct intel_emmc_phy {
- struct regmap *syscfg;
- struct clk *emmcclk;
-};
-
-static int intel_emmc_phy_power(struct phy *phy, bool on_off)
-{
- struct intel_emmc_phy *priv = phy_get_drvdata(phy);
- unsigned int caldone;
- unsigned int dllrdy;
- unsigned int freqsel;
- unsigned long rate;
- int ret, quot;
-
- /*
- * Keep phyctrl_pdb and phyctrl_endll low to allow
- * initialization of CALIO state M/C DFFs
- */
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
- PDB_SHIFT(0));
- if (ret) {
- dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
- return ret;
- }
-
- /* Already finish power_off above */
- if (!on_off)
- return 0;
-
- rate = clk_get_rate(priv->emmcclk);
- quot = DIV_ROUND_CLOSEST(rate, 50000000);
- if (quot > FRQSEL_150M)
- dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
- freqsel = clamp_t(int, quot, FRQSEL_25M, FRQSEL_150M);
-
- /*
- * According to the user manual, calpad calibration
- * cycle takes more than 2us without the minimal recommended
- * value, so we may need a little margin here
- */
- udelay(5);
-
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
- PDB_SHIFT(1));
- if (ret) {
- dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
- return ret;
- }
-
- /*
- * According to the user manual, it asks driver to wait 5us for
- * calpad busy trimming. However it is documented that this value is
- * PVT(A.K.A process,voltage and temperature) relevant, so some
- * failure cases are found which indicates we should be more tolerant
- * to calpad busy trimming.
- */
- ret = regmap_read_poll_timeout(priv->syscfg, EMMC_PHYSTAT_REG,
- caldone, IS_CALDONE(caldone),
- 0, 50);
- if (ret) {
- dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
- return ret;
- }
-
- /* Set the frequency of the DLL operation */
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL2_REG, FRQSEL_MASK,
- FRQSEL_SHIFT(freqsel));
- if (ret) {
- dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
- return ret;
- }
-
- /* Turn on the DLL */
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, ENDLL_MASK,
- ENDLL_SHIFT(1));
- if (ret) {
- dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
- return ret;
- }
-
- /*
- * After enabling analog DLL circuits docs say that we need 10.2 us if
- * our source clock is at 50 MHz and that lock time scales linearly
- * with clock speed. If we are powering on the PHY and the card clock
- * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
- * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
- * Hopefully we won't be running at 100 kHz, but we should still make
- * sure we wait long enough.
- *
- * NOTE: There appear to be corner cases where the DLL seems to take
- * extra long to lock for reasons that aren't understood. In some
- * extreme cases we've seen it take up to over 10ms (!). We'll be
- * generous and give it 50ms.
- */
- ret = regmap_read_poll_timeout(priv->syscfg,
- EMMC_PHYSTAT_REG,
- dllrdy, IS_DLLRDY(dllrdy),
- 0, 50 * USEC_PER_MSEC);
- if (ret) {
- dev_err(&phy->dev, "dllrdy failed. ret=%d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int intel_emmc_phy_init(struct phy *phy)
-{
- struct intel_emmc_phy *priv = phy_get_drvdata(phy);
-
- /*
- * We purposely get the clock here and not in probe to avoid the
- * circular dependency problem. We expect:
- * - PHY driver to probe
- * - SDHCI driver to start probe
- * - SDHCI driver to register it's clock
- * - SDHCI driver to get the PHY
- * - SDHCI driver to init the PHY
- *
- * The clock is optional, so upon any error just return it like
- * any other error to user.
- *
- */
- priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
- if (IS_ERR(priv->emmcclk)) {
- dev_err(&phy->dev, "ERROR: getting emmcclk\n");
- return PTR_ERR(priv->emmcclk);
- }
-
- return 0;
-}
-
-static int intel_emmc_phy_exit(struct phy *phy)
-{
- struct intel_emmc_phy *priv = phy_get_drvdata(phy);
-
- clk_put(priv->emmcclk);
-
- return 0;
-}
-
-static int intel_emmc_phy_power_on(struct phy *phy)
-{
- struct intel_emmc_phy *priv = phy_get_drvdata(phy);
- int ret;
-
- /* Drive impedance: 50 Ohm */
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, DR_TY_MASK,
- DR_TY_SHIFT(6));
- if (ret) {
- dev_err(&phy->dev, "ERROR set drive-impednce-50ohm: %d\n", ret);
- return ret;
- }
-
- /* Output tap delay: disable */
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, OTAPDLYENA,
- 0);
- if (ret) {
- dev_err(&phy->dev, "ERROR Set output tap delay : %d\n", ret);
- return ret;
- }
-
- /* Output tap delay */
- ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG,
- OTAPDLYSEL_MASK, OTAPDLYSEL_SHIFT(4));
- if (ret) {
- dev_err(&phy->dev, "ERROR: output tap dly select: %d\n", ret);
- return ret;
- }
-
- /* Power up eMMC phy analog blocks */
- return intel_emmc_phy_power(phy, true);
-}
-
-static int intel_emmc_phy_power_off(struct phy *phy)
-{
- /* Power down eMMC phy analog blocks */
- return intel_emmc_phy_power(phy, false);
-}
-
-static const struct phy_ops ops = {
- .init = intel_emmc_phy_init,
- .exit = intel_emmc_phy_exit,
- .power_on = intel_emmc_phy_power_on,
- .power_off = intel_emmc_phy_power_off,
- .owner = THIS_MODULE,
-};
-
-static int intel_emmc_phy_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct intel_emmc_phy *priv;
- struct phy *generic_phy;
- struct phy_provider *phy_provider;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- /* Get eMMC phy (accessed via chiptop) regmap */
- priv->syscfg = syscon_regmap_lookup_by_phandle(np, "intel,syscon");
- if (IS_ERR(priv->syscfg)) {
- dev_err(dev, "failed to find syscon\n");
- return PTR_ERR(priv->syscfg);
- }
-
- generic_phy = devm_phy_create(dev, np, &ops);
- if (IS_ERR(generic_phy)) {
- dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(generic_phy);
- }
-
- phy_set_drvdata(generic_phy, priv);
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id intel_emmc_phy_dt_ids[] = {
- { .compatible = "intel,lgm-emmc-phy" },
- {}
-};
-
-MODULE_DEVICE_TABLE(of, intel_emmc_phy_dt_ids);
-
-static struct platform_driver intel_emmc_driver = {
- .probe = intel_emmc_phy_probe,
- .driver = {
- .name = "intel-emmc-phy",
- .of_match_table = intel_emmc_phy_dt_ids,
- },
-};
-
-module_platform_driver(intel_emmc_driver);
-
-MODULE_AUTHOR("Peter Harliman Liem <peter.harliman.liem@intel.com>");
-MODULE_DESCRIPTION("Intel eMMC PHY driver");
-MODULE_LICENSE("GPL v2");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Combo-PHY driver
+ *
+ * Copyright (C) 2019-2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#define PCIE_PHY_GEN_CTRL 0x00
+#define PCIE_PHY_CLK_PAD BIT(17)
+
+#define PAD_DIS_CFG 0x174
+
+#define PCS_XF_ATE_OVRD_IN_2 0x3008
+#define ADAPT_REQ_MSK GENMASK(5, 4)
+
+#define PCS_XF_RX_ADAPT_ACK 0x3010
+#define RX_ADAPT_ACK_BIT BIT(0)
+
+#define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
+#define REG_COMBO_MODE(x) ((x) * 0x200)
+#define REG_CLK_DISABLE(x) ((x) * 0x200 + 0x124)
+
+#define COMBO_PHY_ID(x) ((x)->parent->id)
+#define PHY_ID(x) ((x)->id)
+
+#define CLK_100MHZ 100000000
+#define CLK_156_25MHZ 156250000
+
+static const unsigned long intel_iphy_clk_rates[] = {
+ CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
+};
+
+enum {
+ PHY_0,
+ PHY_1,
+ PHY_MAX_NUM
+};
+
+/*
+ * Clock Register bit fields to enable clocks
+ * for ComboPhy according to the mode.
+ */
+enum intel_phy_mode {
+ PHY_PCIE_MODE = 0,
+ PHY_XPCS_MODE,
+ PHY_SATA_MODE,
+};
+
+/* ComboPhy mode Register values */
+enum intel_combo_mode {
+ PCIE0_PCIE1_MODE = 0,
+ PCIE_DL_MODE,
+ RXAUI_MODE,
+ XPCS0_XPCS1_MODE,
+ SATA0_SATA1_MODE,
+};
+
+enum aggregated_mode {
+ PHY_SL_MODE,
+ PHY_DL_MODE,
+};
+
+struct intel_combo_phy;
+
+struct intel_cbphy_iphy {
+ struct phy *phy;
+ struct intel_combo_phy *parent;
+ struct reset_control *app_rst;
+ u32 id;
+};
+
+struct intel_combo_phy {
+ struct device *dev;
+ struct clk *core_clk;
+ unsigned long clk_rate;
+ void __iomem *app_base;
+ void __iomem *cr_base;
+ struct regmap *syscfg;
+ struct regmap *hsiocfg;
+ u32 id;
+ u32 bid;
+ struct reset_control *phy_rst;
+ struct reset_control *core_rst;
+ struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
+ enum intel_phy_mode phy_mode;
+ enum aggregated_mode aggr_mode;
+ u32 init_cnt;
+ struct mutex lock;
+};
+
+static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
+ u32 val;
+
+ /* Register: 0 is enable, 1 is disable */
+ val = set ? 0 : mask;
+
+ return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
+ mask, val);
+}
+
+static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ u32 mask = BIT(cbphy->id * 2 + iphy->id);
+ u32 val;
+
+ /* Register: 0 is enable, 1 is disable */
+ val = set ? 0 : mask;
+
+ return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
+}
+
+static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
+ u32 mask, u32 val)
+{
+ u32 reg_val;
+
+ reg_val = readl(base + reg);
+ reg_val &= ~mask;
+ reg_val |= val;
+ writel(reg_val, base + reg);
+}
+
+static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
+ int (*phy_cfg)(struct intel_cbphy_iphy *))
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = phy_cfg(iphy);
+ if (ret)
+ return ret;
+
+ if (cbphy->aggr_mode != PHY_DL_MODE)
+ return 0;
+
+ return phy_cfg(&cbphy->iphy[PHY_1]);
+}
+
+static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
+ PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
+
+ /* Delay for stable clock PLL */
+ usleep_range(50, 100);
+
+ return 0;
+}
+
+static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
+ PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
+
+ return 0;
+}
+
+static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
+{
+ enum intel_combo_mode cb_mode;
+ enum aggregated_mode aggr = cbphy->aggr_mode;
+ struct device *dev = cbphy->dev;
+ enum intel_phy_mode mode;
+ int ret;
+
+ mode = cbphy->phy_mode;
+
+ switch (mode) {
+ case PHY_PCIE_MODE:
+ cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
+ break;
+
+ case PHY_XPCS_MODE:
+ cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
+ break;
+
+ case PHY_SATA_MODE:
+ if (aggr == PHY_DL_MODE) {
+ dev_err(dev, "Mode:%u not support dual lane!\n", mode);
+ return -EINVAL;
+ }
+
+ cb_mode = SATA0_SATA1_MODE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
+ if (ret)
+ dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
+
+ return ret;
+}
+
+static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
+{
+ reset_control_assert(cbphy->core_rst);
+ reset_control_assert(cbphy->phy_rst);
+}
+
+static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
+{
+ reset_control_deassert(cbphy->core_rst);
+ reset_control_deassert(cbphy->phy_rst);
+ /* Delay to ensure reset process is done */
+ usleep_range(10, 20);
+}
+
+static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ if (!cbphy->init_cnt) {
+ ret = clk_prepare_enable(cbphy->core_clk);
+ if (ret) {
+ dev_err(cbphy->dev, "Clock enable failed!\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
+ if (ret) {
+ dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
+ cbphy->clk_rate);
+ goto clk_err;
+ }
+
+ intel_cbphy_rst_assert(cbphy);
+ intel_cbphy_rst_deassert(cbphy);
+ ret = intel_cbphy_set_mode(cbphy);
+ if (ret)
+ goto clk_err;
+ }
+
+ ret = intel_cbphy_iphy_enable(iphy, true);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed enabling PHY core\n");
+ goto clk_err;
+ }
+
+ ret = reset_control_deassert(iphy->app_rst);
+ if (ret) {
+ dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+ goto clk_err;
+ }
+
+ /* Delay to ensure reset process is done */
+ udelay(1);
+
+ return 0;
+
+clk_err:
+ clk_disable_unprepare(cbphy->core_clk);
+
+ return ret;
+}
+
+static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = reset_control_assert(iphy->app_rst);
+ if (ret) {
+ dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+ return ret;
+ }
+
+ ret = intel_cbphy_iphy_enable(iphy, false);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed disabling PHY core\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ clk_disable_unprepare(cbphy->core_clk);
+ intel_cbphy_rst_assert(cbphy);
+
+ return 0;
+}
+
+static int intel_cbphy_init(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ mutex_lock(&cbphy->lock);
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
+ if (ret)
+ goto err;
+
+ if (cbphy->phy_mode == PHY_PCIE_MODE) {
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
+ if (ret)
+ goto err;
+ }
+
+ cbphy->init_cnt++;
+
+err:
+ mutex_unlock(&cbphy->lock);
+
+ return ret;
+}
+
+static int intel_cbphy_exit(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ mutex_lock(&cbphy->lock);
+ cbphy->init_cnt--;
+ if (cbphy->phy_mode == PHY_PCIE_MODE) {
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
+ if (ret)
+ goto err;
+ }
+
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
+
+err:
+ mutex_unlock(&cbphy->lock);
+
+ return ret;
+}
+
+static int intel_cbphy_calibrate(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ void __iomem *cr_base = cbphy->cr_base;
+ int val, ret, id;
+
+ if (cbphy->phy_mode != PHY_XPCS_MODE)
+ return 0;
+
+ id = PHY_ID(iphy);
+
+ /* trigger auto RX adaptation */
+ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
+ ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
+ /* Wait RX adaptation to finish */
+ ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
+ val, val & RX_ADAPT_ACK_BIT, 10, 5000);
+ if (ret)
+ dev_err(cbphy->dev, "RX Adaptation failed!\n");
+ else
+ dev_dbg(cbphy->dev, "RX Adaptation success!\n");
+
+ /* Stop RX adaptation */
+ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
+ ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
+
+ return ret;
+}
+
+static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
+{
+ struct device *dev = cbphy->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_reference_args ref;
+ int ret;
+ u32 val;
+
+ cbphy->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cbphy->core_clk)) {
+ ret = PTR_ERR(cbphy->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get clk failed:%d!\n", ret);
+ return ret;
+ }
+
+ cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
+ if (IS_ERR(cbphy->core_rst)) {
+ ret = PTR_ERR(cbphy->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get core reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
+ if (IS_ERR(cbphy->phy_rst)) {
+ ret = PTR_ERR(cbphy->phy_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get PHY reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
+ if (IS_ERR(cbphy->iphy[0].app_rst)) {
+ ret = PTR_ERR(cbphy->iphy[0].app_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
+ if (IS_ERR(cbphy->iphy[1].app_rst)) {
+ ret = PTR_ERR(cbphy->iphy[1].app_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(cbphy->app_base))
+ return PTR_ERR(cbphy->app_base);
+
+ cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(cbphy->cr_base))
+ return PTR_ERR(cbphy->cr_base);
+
+ /*
+ * syscfg and hsiocfg variables stores the handle of the registers set
+ * in which ComboPhy subsytem specific registers are subset. Using
+ * Register map framework to access the registers set.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
+ 1, 0, &ref);
+ if (ret < 0)
+ return ret;
+
+ cbphy->id = ref.args[0];
+ cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
+ fwnode_handle_put(ref.fwnode);
+
+ ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
+ 0, &ref);
+ if (ret < 0)
+ return ret;
+
+ cbphy->bid = ref.args[0];
+ cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
+ fwnode_handle_put(ref.fwnode);
+
+ ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case PHY_TYPE_PCIE:
+ cbphy->phy_mode = PHY_PCIE_MODE;
+ break;
+
+ case PHY_TYPE_SATA:
+ cbphy->phy_mode = PHY_SATA_MODE;
+ break;
+
+ case PHY_TYPE_XPCS:
+ cbphy->phy_mode = PHY_XPCS_MODE;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PHY mode: %u\n", val);
+ return -EINVAL;
+ }
+
+ cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
+
+ if (fwnode_property_present(fwnode, "intel,aggregation"))
+ cbphy->aggr_mode = PHY_DL_MODE;
+ else
+ cbphy->aggr_mode = PHY_SL_MODE;
+
+ return 0;
+}
+
+static const struct phy_ops intel_cbphy_ops = {
+ .init = intel_cbphy_init,
+ .exit = intel_cbphy_exit,
+ .calibrate = intel_cbphy_calibrate,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *intel_cbphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
+ u32 iphy_id;
+
+ if (args->args_count < 1) {
+ dev_err(dev, "Invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iphy_id = args->args[0];
+ if (iphy_id >= PHY_MAX_NUM) {
+ dev_err(dev, "Invalid phy instance %d\n", iphy_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
+ dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cbphy->iphy[iphy_id].phy;
+}
+
+static int intel_cbphy_create(struct intel_combo_phy *cbphy)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = cbphy->dev;
+ struct intel_cbphy_iphy *iphy;
+ int i;
+
+ for (i = 0; i < PHY_MAX_NUM; i++) {
+ iphy = &cbphy->iphy[i];
+ iphy->parent = cbphy;
+ iphy->id = i;
+
+ /* In dual lane mode skip phy creation for the second phy */
+ if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
+ continue;
+
+ iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
+ if (IS_ERR(iphy->phy)) {
+ dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+
+ return PTR_ERR(iphy->phy);
+ }
+
+ phy_set_drvdata(iphy->phy, iphy);
+ }
+
+ dev_set_drvdata(dev, cbphy);
+ phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
+ if (IS_ERR(phy_provider))
+ dev_err(dev, "Register PHY provider failed!\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int intel_cbphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_combo_phy *cbphy;
+ int ret;
+
+ cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
+ if (!cbphy)
+ return -ENOMEM;
+
+ cbphy->dev = dev;
+ cbphy->init_cnt = 0;
+ mutex_init(&cbphy->lock);
+ ret = intel_cbphy_fwnode_parse(cbphy);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, cbphy);
+
+ return intel_cbphy_create(cbphy);
+}
+
+static int intel_cbphy_remove(struct platform_device *pdev)
+{
+ struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
+
+ intel_cbphy_rst_assert(cbphy);
+ clk_disable_unprepare(cbphy->core_clk);
+ return 0;
+}
+
+static const struct of_device_id of_intel_cbphy_match[] = {
+ { .compatible = "intel,combo-phy" },
+ { .compatible = "intel,combophy-lgm" },
+ {}
+};
+
+static struct platform_driver intel_cbphy_driver = {
+ .probe = intel_cbphy_probe,
+ .remove = intel_cbphy_remove,
+ .driver = {
+ .name = "intel-combo-phy",
+ .of_match_table = of_intel_cbphy_match,
+ }
+};
+
+module_platform_driver(intel_cbphy_driver);
+
+MODULE_DESCRIPTION("Intel Combo-phy driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel eMMC PHY driver
+ * Copyright (C) 2019 Intel, Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* eMMC phy register definitions */
+#define EMMC_PHYCTRL0_REG 0xa8
+#define DR_TY_MASK GENMASK(30, 28)
+#define DR_TY_SHIFT(x) (((x) << 28) & DR_TY_MASK)
+#define OTAPDLYENA BIT(14)
+#define OTAPDLYSEL_MASK GENMASK(13, 10)
+#define OTAPDLYSEL_SHIFT(x) (((x) << 10) & OTAPDLYSEL_MASK)
+
+#define EMMC_PHYCTRL1_REG 0xac
+#define PDB_MASK BIT(0)
+#define PDB_SHIFT(x) (((x) << 0) & PDB_MASK)
+#define ENDLL_MASK BIT(7)
+#define ENDLL_SHIFT(x) (((x) << 7) & ENDLL_MASK)
+
+#define EMMC_PHYCTRL2_REG 0xb0
+#define FRQSEL_25M 0
+#define FRQSEL_50M 1
+#define FRQSEL_100M 2
+#define FRQSEL_150M 3
+#define FRQSEL_MASK GENMASK(24, 22)
+#define FRQSEL_SHIFT(x) (((x) << 22) & FRQSEL_MASK)
+
+#define EMMC_PHYSTAT_REG 0xbc
+#define CALDONE_MASK BIT(9)
+#define DLLRDY_MASK BIT(8)
+#define IS_CALDONE(x) ((x) & CALDONE_MASK)
+#define IS_DLLRDY(x) ((x) & DLLRDY_MASK)
+
+struct intel_emmc_phy {
+ struct regmap *syscfg;
+ struct clk *emmcclk;
+};
+
+static int intel_emmc_phy_power(struct phy *phy, bool on_off)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ unsigned int caldone;
+ unsigned int dllrdy;
+ unsigned int freqsel;
+ unsigned long rate;
+ int ret, quot;
+
+ /*
+ * Keep phyctrl_pdb and phyctrl_endll low to allow
+ * initialization of CALIO state M/C DFFs
+ */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(0));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Already finish power_off above */
+ if (!on_off)
+ return 0;
+
+ rate = clk_get_rate(priv->emmcclk);
+ quot = DIV_ROUND_CLOSEST(rate, 50000000);
+ if (quot > FRQSEL_150M)
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+ freqsel = clamp_t(int, quot, FRQSEL_25M, FRQSEL_150M);
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(5);
+
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, PDB_MASK,
+ PDB_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A process,voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg, EMMC_PHYSTAT_REG,
+ caldone, IS_CALDONE(caldone),
+ 0, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Set the frequency of the DLL operation */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL2_REG, FRQSEL_MASK,
+ FRQSEL_SHIFT(freqsel));
+ if (ret) {
+ dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
+ return ret;
+ }
+
+ /* Turn on the DLL */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL1_REG, ENDLL_MASK,
+ ENDLL_SHIFT(1));
+ if (ret) {
+ dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * Hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg,
+ EMMC_PHYSTAT_REG,
+ dllrdy, IS_DLLRDY(dllrdy),
+ 0, 50 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&phy->dev, "dllrdy failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_init(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error just return it like
+ * any other error to user.
+ *
+ */
+ priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ if (IS_ERR(priv->emmcclk)) {
+ dev_err(&phy->dev, "ERROR: getting emmcclk\n");
+ return PTR_ERR(priv->emmcclk);
+ }
+
+ return 0;
+}
+
+static int intel_emmc_phy_exit(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+
+ clk_put(priv->emmcclk);
+
+ return 0;
+}
+
+static int intel_emmc_phy_power_on(struct phy *phy)
+{
+ struct intel_emmc_phy *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Drive impedance: 50 Ohm */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, DR_TY_MASK,
+ DR_TY_SHIFT(6));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR set drive-impednce-50ohm: %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay: disable */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG, OTAPDLYENA,
+ 0);
+ if (ret) {
+ dev_err(&phy->dev, "ERROR Set output tap delay : %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay */
+ ret = regmap_update_bits(priv->syscfg, EMMC_PHYCTRL0_REG,
+ OTAPDLYSEL_MASK, OTAPDLYSEL_SHIFT(4));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: output tap dly select: %d\n", ret);
+ return ret;
+ }
+
+ /* Power up eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, true);
+}
+
+static int intel_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down eMMC phy analog blocks */
+ return intel_emmc_phy_power(phy, false);
+}
+
+static const struct phy_ops ops = {
+ .init = intel_emmc_phy_init,
+ .exit = intel_emmc_phy_exit,
+ .power_on = intel_emmc_phy_power_on,
+ .power_off = intel_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int intel_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct intel_emmc_phy *priv;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Get eMMC phy (accessed via chiptop) regmap */
+ priv->syscfg = syscon_regmap_lookup_by_phandle(np, "intel,syscon");
+ if (IS_ERR(priv->syscfg)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(priv->syscfg);
+ }
+
+ generic_phy = devm_phy_create(dev, np, &ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id intel_emmc_phy_dt_ids[] = {
+ { .compatible = "intel,lgm-emmc-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, intel_emmc_phy_dt_ids);
+
+static struct platform_driver intel_emmc_driver = {
+ .probe = intel_emmc_phy_probe,
+ .driver = {
+ .name = "intel-emmc-phy",
+ .of_match_table = intel_emmc_phy_dt_ids,
+ },
+};
+
+module_platform_driver(intel_emmc_driver);
+
+MODULE_AUTHOR("Peter Harliman Liem <peter.harliman.liem@intel.com>");
+MODULE_DESCRIPTION("Intel eMMC PHY driver");
+MODULE_LICENSE("GPL v2");