/* * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_AVM_KERNEL) #include #include #endif #define MDIO_CTRL_0_REG 0x40 #define MDIO_CTRL_1_REG 0x44 #define MDIO_CTRL_2_REG 0x48 #define MDIO_CTRL_3_REG 0x4c #define MDIO_CTRL_4_REG 0x50 #define MDIO_CTRL_4_ACCESS_BUSY BIT(16) #define MDIO_CTRL_4_ACCESS_START BIT(8) #define MDIO_CTRL_4_ACCESS_CODE_READ 0 #define MDIO_CTRL_4_ACCESS_CODE_WRITE 1 #define MDIO_CTRL_4_ACCESS_CODE_C45_ADDR 0 #define MDIO_CTRL_4_ACCESS_CODE_C45_WRITE 1 #define MDIO_CTRL_4_ACCESS_CODE_C45_READ 2 #define CTRL_0_REG_DEFAULT_VALUE (0x1500F | am->speed_div) #define CTRL_0_REG_C45_DEFAULT_VALUE (0x1510F | am->speed_div) #define QCA_MDIO_RETRY 1000 #define QCA_MDIO_DELAY 10 #define QCA_MAX_PHY_RESET 3 #define QCA_MDIO_CLK_RATE 100000000 #define TCSR_LDO_ADDR 0x19475C4 #define GCC_GEPHY_ADDR 0x1856004 #define REG_SIZE 4 struct qca_mdio_data { struct mii_bus *mii_bus; struct clk *mdio_clk; void __iomem *membase; int phy_irq[PHY_MAX_ADDR]; unsigned int speed_div; struct dentry *speed_file; }; static int qca_mdio_wait_busy(struct qca_mdio_data *am) { int i; for (i = 0; i < QCA_MDIO_RETRY; i++) { unsigned int busy; busy = readl(am->membase + MDIO_CTRL_4_REG); busy &= MDIO_CTRL_4_ACCESS_BUSY; if (!busy) return 0; udelay(QCA_MDIO_DELAY); } pr_err("%s: MDIO operation timed out\n", am->mii_bus->name); return -ETIMEDOUT; } static int qca_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct qca_mdio_data *am = bus->priv; int value = 0; unsigned int cmd = 0; if (qca_mdio_wait_busy(am)) return 0xffff; if (regnum & MII_ADDR_C45) { unsigned int mmd = (regnum >> 16) & 0x1F; unsigned int reg = regnum & 0xFFFF; writel(CTRL_0_REG_C45_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG); /* issue the phy address and mmd */ writel((mii_id << 8) | mmd, am->membase + MDIO_CTRL_1_REG); /* issue reg */ writel(reg, am->membase + MDIO_CTRL_2_REG); cmd = MDIO_CTRL_4_ACCESS_START | MDIO_CTRL_4_ACCESS_CODE_C45_ADDR; } else { writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG); /* issue the phy address and reg */ writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG); cmd = MDIO_CTRL_4_ACCESS_START | MDIO_CTRL_4_ACCESS_CODE_READ; } /* issue command */ writel(cmd, am->membase + MDIO_CTRL_4_REG); /* Wait complete */ if (qca_mdio_wait_busy(am)) return 0xffff; if (regnum & MII_ADDR_C45) { cmd = MDIO_CTRL_4_ACCESS_START | MDIO_CTRL_4_ACCESS_CODE_C45_READ; writel(cmd, am->membase + MDIO_CTRL_4_REG); if (qca_mdio_wait_busy(am)) return 0xffff; } /* Read data */ value = readl(am->membase + MDIO_CTRL_3_REG); return value; } static int qca_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct qca_mdio_data *am = bus->priv; unsigned int cmd = 0; if (qca_mdio_wait_busy(am)) return 0xffff; if (regnum & MII_ADDR_C45) { unsigned int mmd = (regnum >> 16) & 0x1F; unsigned int reg = regnum & 0xFFFF; writel(CTRL_0_REG_C45_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG); /* issue the phy address and mmd */ writel((mii_id << 8) | mmd, am->membase + MDIO_CTRL_1_REG); /* issue reg */ writel(reg, am->membase + MDIO_CTRL_2_REG); cmd = MDIO_CTRL_4_ACCESS_START | MDIO_CTRL_4_ACCESS_CODE_C45_ADDR; writel(cmd, am->membase + MDIO_CTRL_4_REG); if (qca_mdio_wait_busy(am)) return -ETIMEDOUT; } else { writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG); /* issue the phy address and reg */ writel((mii_id << 8) | regnum, am->membase + MDIO_CTRL_1_REG); } /* issue write data */ writel(value, am->membase + MDIO_CTRL_2_REG); /* issue write command */ if (regnum & MII_ADDR_C45) cmd = MDIO_CTRL_4_ACCESS_START | MDIO_CTRL_4_ACCESS_CODE_C45_WRITE; else cmd = MDIO_CTRL_4_ACCESS_START | MDIO_CTRL_4_ACCESS_CODE_WRITE; writel(cmd, am->membase + MDIO_CTRL_4_REG); /* Wait write complete */ if (qca_mdio_wait_busy(am)) return -ETIMEDOUT; return 0; } static int qca_phy_gpio_set(struct platform_device *pdev, int number) { int ret; ret = gpio_request(number, "phy-reset-gpio"); if (ret) { dev_err(&pdev->dev, "Can't get phy-reset-gpio %d\n", ret); return ret; } ret = gpio_direction_output(number, 0x0); if (ret) { dev_err(&pdev->dev, "Can't set direction for phy-reset-gpio %d\n", ret); goto phy_reset_out; } usleep_range(100000, 110000); gpio_set_value(number, 0x01); usleep_range(100000, 110000); phy_reset_out: gpio_free(number); return ret; } static int qca_phy_reset(struct platform_device *pdev) { struct device_node *mdio_node = pdev->dev.of_node; int phy_reset_gpio_number; int ret, i; for (i = 0; i < QCA_MAX_PHY_RESET; i++) { ret = of_get_named_gpio(mdio_node, "phy-reset-gpio", i); if (ret < 0) { dev_info(&pdev->dev, "Could not find phy-reset-gpio, " "idx %d\n", i); return 0; } /* AVM/CMM: We only want to do an early PHY reset when it really * is needed to keep the count for autonegotiation attempts as * low as possible. Some Platform/Switch combinations still * require an early phy reset during mdio initialisation. This * gets sorted out here. */ if (!of_property_read_bool(mdio_node, "avm,phy-early-reset")) { dev_info(&pdev->dev, "Skipping early reset\n"); return 0; } dev_info(&pdev->dev, "Early reset with phy-reset-gpio, idx %d\n", i); phy_reset_gpio_number = ret; ret = qca_phy_gpio_set(pdev, phy_reset_gpio_number); if (ret) return ret; } return 0; } static bool qca_mdio_clk_div_check(unsigned int val) { return (val == 0 || val == 1 || val == 3 || val == 7 || val == 15); } static int qca_mdio_debug_speed_set(void *data, unsigned long long val) { struct qca_mdio_data *am = data; if (val & ~0xf || !qca_mdio_clk_div_check(val)) { dev_warn(am->mii_bus->parent, "Invalid value %llx: Must be in 0x0, 0x1, 0x3, 0x7, 0xf\n", val); return -EINVAL; } am->speed_div = (unsigned int)val << 4; return 0; } static int qca_mdio_debug_speed_get(void *data, unsigned long long *val) { struct qca_mdio_data *am = data; *val = am->speed_div >> 4; return 0; } DEFINE_SIMPLE_ATTRIBUTE(qca_mdio_debug_speed_fops, qca_mdio_debug_speed_get, qca_mdio_debug_speed_set, "%llx\n"); static void qca_mdio_debugfs_init(struct qca_mdio_data *am) { am->speed_file = debugfs_create_file("qca_mdio_clk_div", 0644, /* S_IRUGO | S_IWUSR,*/ NULL, am, &qca_mdio_debug_speed_fops); if (IS_ERR_OR_NULL(am->speed_file)) dev_err(am->mii_bus->parent, "Cannot create qca_mdio_clk_div debug entry\n"); } static void qca_tcsr_ldo_rdy_set(bool ready) { void __iomem *tcsr_base = NULL; u32 val; tcsr_base = ioremap_nocache(TCSR_LDO_ADDR, REG_SIZE); if (!tcsr_base) return; val = readl(tcsr_base); if (ready) val |= 1; else val &= ~1; writel(val, tcsr_base); usleep_range(100000, 110000); iounmap(tcsr_base); } static int qca_mdio_probe(struct platform_device *pdev) { struct qca_mdio_data *am; struct resource *res; int ret, i; struct reset_control *rst = ERR_PTR(-EINVAL); if (of_machine_is_compatible("qcom,ipq5018")) { qca_tcsr_ldo_rdy_set(true); rst = of_reset_control_get(pdev->dev.of_node, "gephy_mdc_rst"); if (!IS_ERR(rst)) { reset_control_deassert(rst); usleep_range(100000, 110000); } } ret = qca_phy_reset(pdev); /* AVM/AMY: We don't exit when we did not find a reset GPIO since * resetting PHY is not mandatory and for some products PHY * reset is not connected at all. */ if (ret) dev_info(&pdev->dev, "Could not find reset gpio\n"); am = devm_kzalloc(&pdev->dev, sizeof(*am), GFP_KERNEL); if (!am) return -ENOMEM; am->mdio_clk = devm_clk_get(&pdev->dev, "gcc_mdio_ahb_clk"); if (!IS_ERR(am->mdio_clk)) { ret = clk_set_rate(am->mdio_clk, QCA_MDIO_CLK_RATE); if (ret) goto err_out; ret = clk_prepare_enable(am->mdio_clk); if (ret) goto err_out; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no iomem resource found\n"); ret = -ENXIO; goto err_disable_clk; } am->membase = devm_ioremap_resource(&pdev->dev, res); if (!am->membase) { dev_err(&pdev->dev, "unable to ioremap registers\n"); ret = -ENOMEM; goto err_disable_clk; } am->mii_bus = mdiobus_alloc(); if (!am->mii_bus) { ret = -ENOMEM; goto err_disable_clk; } writel(CTRL_0_REG_DEFAULT_VALUE, am->membase + MDIO_CTRL_0_REG); am->mii_bus->name = "qca_mdio"; am->mii_bus->read = &qca_mdio_read; am->mii_bus->write = &qca_mdio_write; am->mii_bus->irq = am->phy_irq; am->mii_bus->priv = am; am->mii_bus->parent = &pdev->dev; snprintf(am->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(&pdev->dev)); for (i = 0; i < PHY_MAX_ADDR; i++) am->phy_irq[i] = PHY_POLL; platform_set_drvdata(pdev, am); of_property_read_u32(pdev->dev.of_node, "avm,speed_div", &am->speed_div); if (!qca_mdio_clk_div_check(am->speed_div)) { dev_warn(&pdev->dev, "avm,speed_dev=0x%x Invalid value: Must be in 0x0, 0x1, 0x3, 0x7, 0xf. Default to 0.\n", am->speed_div); am->speed_div = 0; } else { am->speed_div <<= 4; } /* AVM: Get PLC out of reset. */ #if defined(CONFIG_AVM_KERNEL) /* AVM - For Powerline with PHY that goes into reset with PLC reset * line, get it out of reset for initialization. Doesn't matter if this * starts sending single packets. If something gets through, it gets * deleted during edma initialization... Or it gets ignored. * avm_net_event is responsible for putting plc back into reset once * driver initialization is completed. */ ret = avm_get_hw_config(AVM_HW_CONFIG_VERSION, "gpio_avm_reset_plc", &i, NULL); if (ret >= 0) avm_gpio_out_bit(i, 1); #endif ret = of_mdiobus_register(am->mii_bus, pdev->dev.of_node); if (ret) goto err_free_bus; qca_mdio_debugfs_init(am); dev_info(&pdev->dev, "qca-mdio driver was registered\n"); if (of_machine_is_compatible("qcom,ipq5018")) { qca_tcsr_ldo_rdy_set(false); if (!IS_ERR(rst)) reset_control_assert(rst); } return 0; err_free_bus: mdiobus_free(am->mii_bus); err_disable_clk: if (!IS_ERR(am->mdio_clk)) clk_disable_unprepare(am->mdio_clk); err_out: if (of_machine_is_compatible("qcom,ipq5018")) { qca_tcsr_ldo_rdy_set(false); if (!IS_ERR(rst)) reset_control_assert(rst); } return ret; } static int qca_mdio_remove(struct platform_device *pdev) { struct qca_mdio_data *am = platform_get_drvdata(pdev); if (am) { debugfs_remove(am->speed_file); if (!IS_ERR(am->mdio_clk)) clk_disable_unprepare(am->mdio_clk); mdiobus_unregister(am->mii_bus); mdiobus_free(am->mii_bus); platform_set_drvdata(pdev, NULL); } return 0; } static const struct of_device_id qca_mdio_dt_ids[] = { { .compatible = "qcom,ipq40xx-mdio" }, { .compatible = "qcom,qca-mdio" }, { } }; MODULE_DEVICE_TABLE(of, qca_mdio_dt_ids); static struct platform_driver qca_mdio_driver = { .probe = qca_mdio_probe, .remove = qca_mdio_remove, .driver = { .name = "qca-mdio", .of_match_table = qca_mdio_dt_ids, }, }; module_platform_driver(qca_mdio_driver); MODULE_DESCRIPTION("QCA MDIO interface driver"); MODULE_LICENSE("Dual BSD/GPL");