summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/Kconfig16
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/da8xx-mstpri.c267
-rw-r--r--drivers/bus/tegra-gmi.c284
-rw-r--r--drivers/firmware/Kconfig27
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/arm_scpi.c276
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c490
-rw-r--r--drivers/firmware/qcom_scm.c53
-rw-r--r--drivers/firmware/tegra/Kconfig25
-rw-r--r--drivers/firmware/tegra/Makefile2
-rw-r--r--drivers/firmware/tegra/bpmp.c868
-rw-r--r--drivers/firmware/tegra/ivc.c695
-rw-r--r--drivers/firmware/ti_sci.c1991
-rw-r--r--drivers/firmware/ti_sci.h492
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/tegra-hsp.c479
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/memory/atmel-sdramc.c6
-rw-r--r--drivers/memory/da8xx-ddrctl.c173
-rw-r--r--drivers/misc/sram.c42
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c43
-rw-r--r--drivers/reset/reset-berlin.c12
-rw-r--r--drivers/reset/reset-lpc18xx.c32
-rw-r--r--drivers/reset/reset-oxnas.c1
-rw-r--r--drivers/reset/reset-socfpga.c10
-rw-r--r--drivers/reset/reset-sunxi.c9
-rw-r--r--drivers/reset/reset-zynq.c10
-rw-r--r--drivers/reset/sti/Kconfig8
-rw-r--r--drivers/reset/sti/Makefile2
-rw-r--r--drivers/reset/sti/reset-stih415.c112
-rw-r--r--drivers/reset/sti/reset-stih416.c143
-rw-r--r--drivers/reset/tegra/Kconfig2
-rw-r--r--drivers/reset/tegra/Makefile1
-rw-r--r--drivers/reset/tegra/reset-bpmp.c71
-rw-r--r--drivers/soc/mediatek/Kconfig2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c465
-rw-r--r--drivers/soc/renesas/Makefile4
-rw-r--r--drivers/soc/renesas/r8a7743-sysc.c32
-rw-r--r--drivers/soc/renesas/r8a7745-sysc.c32
-rw-r--r--drivers/soc/renesas/rcar-sysc.c6
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/renesas/renesas-soc.c257
-rw-r--r--drivers/soc/rockchip/pm_domains.c81
-rw-r--r--drivers/soc/tegra/Kconfig14
-rw-r--r--drivers/soc/tegra/pmc.c398
52 files changed, 7331 insertions, 636 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 78751057164a..b9e8cfc93c7e 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,13 @@ config TEGRA_ACONNECT
Driver for the Tegra ACONNECT bus which is used to interface with
the devices inside the Audio Processing Engine (APE) for Tegra210.
+config TEGRA_GMI
+ tristate "Tegra Generic Memory Interface bus driver"
+ depends on ARCH_TEGRA
+ help
+ Driver for the Tegra Generic Memory Interface bus which can be used
+ to attach devices such as NOR, UART, FPGA and more.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
@@ -167,4 +174,13 @@ config VEXPRESS_CONFIG
help
Platform configuration infrastructure for the ARM Ltd.
Versatile Express.
+
+config DA8XX_MSTPRI
+ bool "TI da8xx master peripheral priority driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Driver for Texas Instruments da8xx master peripheral priority
+ configuration. Allows to adjust the priorities of all master
+ peripherals.
+
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c6cfa6b2606e..cc6364bec054 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -19,5 +19,8 @@ obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 000000000000..063397f2c0db
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,267 @@
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET 0
+#define DA8XX_MSTPRI1_OFFSET 4
+#define DA8XX_MSTPRI2_OFFSET 8
+
+enum {
+ DA8XX_MSTPRI_ARM_I = 0,
+ DA8XX_MSTPRI_ARM_D,
+ DA8XX_MSTPRI_UPP,
+ DA8XX_MSTPRI_SATA,
+ DA8XX_MSTPRI_PRU0,
+ DA8XX_MSTPRI_PRU1,
+ DA8XX_MSTPRI_EDMA30TC0,
+ DA8XX_MSTPRI_EDMA30TC1,
+ DA8XX_MSTPRI_EDMA31TC0,
+ DA8XX_MSTPRI_VPIF_DMA_0,
+ DA8XX_MSTPRI_VPIF_DMA_1,
+ DA8XX_MSTPRI_EMAC,
+ DA8XX_MSTPRI_USB0CFG,
+ DA8XX_MSTPRI_USB0CDMA,
+ DA8XX_MSTPRI_UHPI,
+ DA8XX_MSTPRI_USB1,
+ DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+ int reg;
+ int shift;
+ int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+ [DA8XX_MSTPRI_ARM_I] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_ARM_D] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_UPP] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_SATA] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_PRU0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_PRU1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_EDMA30TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_EDMA30TC1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_EDMA31TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+ [DA8XX_MSTPRI_EMAC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_USB0CFG] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_USB0CDMA] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_UHPI] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_USB1] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_LCDC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+};
+
+struct da8xx_mstpri_priority {
+ int which;
+ u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+ const char *board;
+ const struct da8xx_mstpri_priority *priorities;
+ size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ * tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+ {
+ .which = DA8XX_MSTPRI_LCDC,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC1,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC0,
+ .val = 1,
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .priorities = da850_lcdk_priorities,
+ .numprio = ARRAY_SIZE(da850_lcdk_priorities),
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+ const struct da8xx_mstpri_board_priorities *board_prio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+ board_prio = &da8xx_mstpri_board_confs[i];
+
+ if (of_machine_is_compatible(board_prio->board))
+ return board_prio;
+ }
+
+ return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+ const struct da8xx_mstpri_board_priorities *prio_list;
+ const struct da8xx_mstpri_descr *prio_descr;
+ const struct da8xx_mstpri_priority *prio;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mstpri;
+ u32 reg;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mstpri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mstpri)) {
+ dev_err(dev, "unable to map MSTPRI registers\n");
+ return PTR_ERR(mstpri);
+ }
+
+ prio_list = da8xx_mstpri_get_board_prio();
+ if (!prio_list) {
+ dev_err(dev, "no master priorities defined for this board\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prio_list->numprio; i++) {
+ prio = &prio_list->priorities[i];
+ prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+ if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev, "register offset out of range\n");
+ continue;
+ }
+
+ reg = readl(mstpri + prio_descr->reg);
+ reg &= ~prio_descr->mask;
+ reg |= prio->val << prio_descr->shift;
+
+ writel(reg, mstpri + prio_descr->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+ { .compatible = "ti,da850-mstpri", },
+ { },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+ .probe = da8xx_mstpri_probe,
+ .driver = {
+ .name = "da8xx-mstpri",
+ .of_match_table = da8xx_mstpri_of_match,
+ },
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
new file mode 100644
index 000000000000..a6570789f7af
--- /dev/null
+++ b/drivers/bus/tegra-gmi.c
@@ -0,0 +1,284 @@
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+#define TEGRA_GMI_CONFIG 0x00
+#define TEGRA_GMI_CONFIG_GO BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
+#define TEGRA_GMI_MUX_MODE BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH BIT(20)
+#define TEGRA_GMI_CS_SELECT(x) ((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0 0x10
+#define TEGRA_GMI_MUXED_WIDTH(x) ((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x) ((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x) ((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x) (x & 0xf)
+
+#define TEGRA_GMI_TIMING1 0x14
+#define TEGRA_GMI_WE_WIDTH(x) ((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x) ((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x) (x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT 8
+
+struct tegra_gmi {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ u32 snor_config;
+ u32 snor_timing0;
+ u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(gmi->rst);
+ usleep_range(2000, 4000);
+ reset_control_deassert(gmi->rst);
+
+ writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+ writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+ gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+ writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+ return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+ u32 config;
+
+ /* stop GMI operation */
+ config = readl(gmi->base + TEGRA_GMI_CONFIG);
+ config &= ~TEGRA_GMI_CONFIG_GO;
+ writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+ reset_control_assert(gmi->rst);
+ clk_disable_unprepare(gmi->clk);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+ struct device_node *child;
+ u32 property, ranges[4];
+ int err;
+
+ child = of_get_next_available_child(gmi->dev->of_node, NULL);
+ if (!child) {
+ dev_err(gmi->dev, "no child nodes found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently only support one child device due to lack of
+ * chip-select address decoding. Which means that we only have one
+ * chip-select line from the GMI controller.
+ */
+ if (of_get_child_count(gmi->dev->of_node) > 1)
+ dev_warn(gmi->dev, "only one child device is supported.");
+
+ if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+ gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+ if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+ gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+ gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+ gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+ gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+ gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+ gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+ /* Decode the CS# */
+ err = of_property_read_u32_array(child, "ranges", ranges, 4);
+ if (err < 0) {
+ /* Invalid binding */
+ if (err == -EOVERFLOW) {
+ dev_err(gmi->dev,
+ "failed to decode CS: invalid ranges length\n");
+ goto error_cs;
+ }
+
+ /*
+ * If we reach here it means that the child node has an empty
+ * ranges or it does not exist at all. Attempt to decode the
+ * CS# from the reg property instead.
+ */
+ err = of_property_read_u32(child, "reg", &property);
+ if (err < 0) {
+ dev_err(gmi->dev,
+ "failed to decode CS: no reg property found\n");
+ goto error_cs;
+ }
+ } else {
+ property = ranges[1];
+ }
+
+ /* Valid chip selects are CS0-CS7 */
+ if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+ dev_err(gmi->dev, "invalid chip select: %d", property);
+ err = -EINVAL;
+ goto error_cs;
+ }
+
+ gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+ /* The default values that are provided below are reset values */
+ if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+ if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+ of_node_put(child);
+ return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_gmi *gmi;
+ struct resource *res;
+ int err;
+
+ gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+ if (!gmi)
+ return -ENOMEM;
+
+ gmi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gmi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gmi->base))
+ return PTR_ERR(gmi->base);
+
+ gmi->clk = devm_clk_get(dev, "gmi");
+ if (IS_ERR(gmi->clk)) {
+ dev_err(dev, "can not get clock\n");
+ return PTR_ERR(gmi->clk);
+ }
+
+ gmi->rst = devm_reset_control_get(dev, "gmi");
+ if (IS_ERR(gmi->rst)) {
+ dev_err(dev, "can not get reset\n");
+ return PTR_ERR(gmi->rst);
+ }
+
+ err = tegra_gmi_parse_dt(gmi);
+ if (err)
+ return err;
+
+ err = tegra_gmi_enable(gmi);
+ if (err < 0)
+ return err;
+
+ err = of_platform_default_populate(dev->of_node, NULL, dev);
+ if (err < 0) {
+ dev_err(dev, "fail to create devices.\n");
+ tegra_gmi_disable(gmi);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, gmi);
+
+ return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+ struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(gmi->dev);
+ tegra_gmi_disable(gmi);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+ { .compatible = "nvidia,tegra20-gmi", },
+ { .compatible = "nvidia,tegra30-gmi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+ .probe = tegra_gmi_probe,
+ .remove = tegra_gmi_remove,
+ .driver = {
+ .name = "tegra-gmi",
+ .of_match_table = tegra_gmi_id_table,
+ },
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d42c74..1867f0d1389b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -8,6 +8,17 @@ menu "Firmware Drivers"
config ARM_PSCI_FW
bool
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
+
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
depends on MAILBOX
@@ -203,6 +214,21 @@ config QCOM_SCM_64
def_bool y
depends on QCOM_SCM && ARM64
+config TI_SCI_PROTOCOL
+ tristate "TI System Control Interface (TISCI) Message Protocol"
+ depends on TI_MESSAGE_MANAGER
+ help
+ TI System Control Interface (TISCI) Message Protocol is used to manage
+ compute systems such as ARM, DSP etc with the system controller in
+ complex System on Chip(SoC) such as those found on certain keystone
+ generation SoC from TI.
+
+ System controller provides various facilities including power
+ management function support.
+
+ This protocol library is used by client drivers to use the features
+ provided by the system controller.
+
config HAVE_ARM_SMCCC
bool
@@ -210,5 +236,6 @@ source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41fa8b3..a37f12e8d137 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,6 +2,7 @@
# Makefile for the linux kernel.
#
obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_DMI) += dmi_scan.o
@@ -20,9 +21,11 @@ obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += tegra/
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ce2bc2a38101..70e13230d8db 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -50,20 +50,27 @@
#define CMD_TOKEN_ID_MASK 0xff
#define CMD_DATA_SIZE_SHIFT 16
#define CMD_DATA_SIZE_MASK 0x1ff
+#define CMD_LEGACY_DATA_SIZE_SHIFT 20
+#define CMD_LEGACY_DATA_SIZE_MASK 0x1ff
#define PACK_SCPI_CMD(cmd_id, tx_sz) \
((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
(((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
#define ADD_SCPI_TOKEN(cmd, token) \
((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
+ ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
+ (((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT))
#define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK)
+#define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \
+ CMD_LEGACY_DATA_SIZE_MASK)
#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK)
#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
#define SCPI_SLOT 0
#define MAX_DVFS_DOMAINS 8
-#define MAX_DVFS_OPPS 8
+#define MAX_DVFS_OPPS 16
#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
@@ -99,6 +106,7 @@ enum scpi_error_codes {
SCPI_ERR_MAX
};
+/* SCPI Standard commands */
enum scpi_std_cmd {
SCPI_CMD_INVALID = 0x00,
SCPI_CMD_SCPI_READY = 0x01,
@@ -132,6 +140,108 @@ enum scpi_std_cmd {
SCPI_CMD_COUNT
};
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+ LEGACY_SCPI_CMD_INVALID = 0x00,
+ LEGACY_SCPI_CMD_SCPI_READY = 0x01,
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ LEGACY_SCPI_CMD_EVENT = 0x03,
+ LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08,
+ LEGACY_SCPI_CMD_L2_READY = 0x09,
+ LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a,
+ LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ LEGACY_SCPI_CMD_SET_DVFS = 0x0e,
+ LEGACY_SCPI_CMD_GET_DVFS = 0x0f,
+ LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10,
+ LEGACY_SCPI_CMD_SET_RTC = 0x11,
+ LEGACY_SCPI_CMD_GET_RTC = 0x12,
+ LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ LEGACY_SCPI_CMD_SET_PSU = 0x18,
+ LEGACY_SCPI_CMD_GET_PSU = 0x19,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b,
+ LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_RTC,
+ LEGACY_SCPI_CMD_GET_RTC,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_PSU,
+ LEGACY_SCPI_CMD_GET_PSU,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+ CMD_SCPI_CAPABILITIES = 0,
+ CMD_GET_CLOCK_INFO,
+ CMD_GET_CLOCK_VALUE,
+ CMD_SET_CLOCK_VALUE,
+ CMD_GET_DVFS,
+ CMD_SET_DVFS,
+ CMD_GET_DVFS_INFO,
+ CMD_SENSOR_CAPABILITIES,
+ CMD_SENSOR_INFO,
+ CMD_SENSOR_VALUE,
+ CMD_SET_DEVICE_PWR_STATE,
+ CMD_GET_DEVICE_PWR_STATE,
+ CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+ SCPI_CMD_SCPI_CAPABILITIES,
+ SCPI_CMD_GET_CLOCK_INFO,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS_INFO,
+ SCPI_CMD_SENSOR_CAPABILITIES,
+ SCPI_CMD_SENSOR_INFO,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SET_DEVICE_PWR_STATE,
+ SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+ -1, /* GET_CLOCK_INFO */
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+ LEGACY_SCPI_CMD_SENSOR_INFO,
+ LEGACY_SCPI_CMD_SENSOR_VALUE,
+ -1, /* SET_DEVICE_PWR_STATE */
+ -1, /* GET_DEVICE_PWR_STATE */
+};
+
struct scpi_xfer {
u32 slot; /* has to be first element */
u32 cmd;
@@ -160,7 +270,10 @@ struct scpi_chan {
struct scpi_drvinfo {
u32 protocol_version;
u32 firmware_version;
+ bool is_legacy;
int num_chans;
+ int *commands;
+ DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
atomic_t next_chan;
struct scpi_ops *scpi_ops;
struct scpi_chan *channels;
@@ -177,6 +290,11 @@ struct scpi_shared_mem {
u8 payload[0];
} __packed;
+struct legacy_scpi_shared_mem {
+ __le32 status;
+ u8 payload[0];
+} __packed;
+
struct scp_capabilities {
__le32 protocol_version;
__le32 event_version;
@@ -202,6 +320,12 @@ struct clk_set_value {
__le32 rate;
} __packed;
+struct legacy_clk_set_value {
+ __le32 rate;
+ __le16 id;
+ __le16 reserved;
+} __packed;
+
struct dvfs_info {
__le32 header;
struct {
@@ -273,19 +397,43 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
return;
}
- list_for_each_entry(t, &ch->rx_pending, node)
- if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
- list_del(&t->node);
- match = t;
- break;
- }
+ /* Command type is not replied by the SCP Firmware in legacy Mode
+ * We should consider that command is the head of pending RX commands
+ * if the list is not empty. In TX only mode, the list would be empty.
+ */
+ if (scpi_info->is_legacy) {
+ match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+ node);
+ list_del(&match->node);
+ } else {
+ list_for_each_entry(t, &ch->rx_pending, node)
+ if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+ list_del(&t->node);
+ match = t;
+ break;
+ }
+ }
/* check if wait_for_completion is in progress or timed-out */
if (match && !completion_done(&match->done)) {
- struct scpi_shared_mem *mem = ch->rx_payload;
- unsigned int len = min(match->rx_len, CMD_SIZE(cmd));
+ unsigned int len;
+
+ if (scpi_info->is_legacy) {
+ struct legacy_scpi_shared_mem *mem = ch->rx_payload;
+
+ /* RX Length is not replied by the legacy Firmware */
+ len = match->rx_len;
+
+ match->status = le32_to_cpu(mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ } else {
+ struct scpi_shared_mem *mem = ch->rx_payload;
+
+ len = min(match->rx_len, CMD_SIZE(cmd));
+
+ match->status = le32_to_cpu(mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ }
- match->status = le32_to_cpu(mem->status);
- memcpy_fromio(match->rx_buf, mem->payload, len);
if (match->rx_len > len)
memset(match->rx_buf + len, 0, match->rx_len - len);
complete(&match->done);
@@ -297,7 +445,10 @@ static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
{
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = ch->rx_payload;
- u32 cmd = le32_to_cpu(mem->command);
+ u32 cmd = 0;
+
+ if (!scpi_info->is_legacy)
+ cmd = le32_to_cpu(mem->command);
scpi_process_cmd(ch, cmd);
}
@@ -309,8 +460,13 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
- if (t->tx_buf)
- memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ if (t->tx_buf) {
+ if (scpi_info->is_legacy)
+ memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+ else
+ memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ }
+
if (t->rx_buf) {
if (!(++ch->token))
++ch->token;
@@ -319,7 +475,9 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
list_add_tail(&t->node, &ch->rx_pending);
spin_unlock_irqrestore(&ch->rx_lock, flags);
}
- mem->command = cpu_to_le32(t->cmd);
+
+ if (!scpi_info->is_legacy)
+ mem->command = cpu_to_le32(t->cmd);
}
static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -344,23 +502,38 @@ static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
mutex_unlock(&ch->xfers_lock);
}
-static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len,
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
void *rx_buf, unsigned int rx_len)
{
int ret;
u8 chan;
+ u8 cmd;
struct scpi_xfer *msg;
struct scpi_chan *scpi_chan;
- chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans;
+ if (scpi_info->commands[idx] < 0)
+ return -EOPNOTSUPP;
+
+ cmd = scpi_info->commands[idx];
+
+ if (scpi_info->is_legacy)
+ chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+ else
+ chan = atomic_inc_return(&scpi_info->next_chan) %
+ scpi_info->num_chans;
scpi_chan = scpi_info->channels + chan;
msg = get_scpi_xfer(scpi_chan);
if (!msg)
return -ENOMEM;
- msg->slot = BIT(SCPI_SLOT);
- msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ if (scpi_info->is_legacy) {
+ msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+ msg->slot = msg->cmd;
+ } else {
+ msg->slot = BIT(SCPI_SLOT);
+ msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ }
msg->tx_buf = tx_buf;
msg->tx_len = tx_len;
msg->rx_buf = rx_buf;
@@ -397,7 +570,7 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
struct clk_get_info clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
- ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id,
+ ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
if (!ret) {
*min = le32_to_cpu(clk.min_rate);
@@ -412,8 +585,9 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
struct clk_get_value clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
- ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id,
+ ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
+
return ret ? ret : le32_to_cpu(clk.rate);
}
@@ -425,7 +599,19 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
.rate = cpu_to_le32(rate)
};
- return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct legacy_clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
&stat, sizeof(stat));
}
@@ -434,8 +620,9 @@ static int scpi_dvfs_get_idx(u8 domain)
int ret;
u8 dvfs_idx;
- ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain),
+ ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
&dvfs_idx, sizeof(dvfs_idx));
+
return ret ? ret : dvfs_idx;
}
@@ -444,7 +631,7 @@ static int scpi_dvfs_set_idx(u8 domain, u8 index)
int stat;
struct dvfs_set dvfs = {domain, index};
- return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+ return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
&stat, sizeof(stat));
}
@@ -468,9 +655,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (scpi_info->dvfs[domain]) /* data already populated */
return scpi_info->dvfs[domain];
- ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+ ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
&buf, sizeof(buf));
-
if (ret)
return ERR_PTR(ret);
@@ -503,7 +689,7 @@ static int scpi_sensor_get_capability(u16 *sensors)
struct sensor_capabilities cap_buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
+ ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
sizeof(cap_buf));
if (!ret)
*sensors = le16_to_cpu(cap_buf.sensors);
@@ -517,7 +703,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
struct _scpi_sensor_info _info;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id),
+ ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
&_info, sizeof(_info));
if (!ret) {
memcpy(info, &_info, sizeof(*info));
@@ -533,7 +719,7 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
struct sensor_value buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id),
+ ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
if (!ret)
*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
@@ -548,7 +734,7 @@ static int scpi_device_get_power_state(u16 dev_id)
u8 pstate;
__le16 id = cpu_to_le16(dev_id);
- ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id,
+ ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
sizeof(id), &pstate, sizeof(pstate));
return ret ? ret : pstate;
}
@@ -561,7 +747,7 @@ static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
.pstate = pstate,
};
- return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
sizeof(dev_set), &stat, sizeof(stat));
}
@@ -591,12 +777,16 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
int ret;
struct scp_capabilities caps;
- ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0,
+ ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
&caps, sizeof(caps));
if (!ret) {
info->protocol_version = le32_to_cpu(caps.protocol_version);
info->firmware_version = le32_to_cpu(caps.platform_version);
}
+ /* Ignore error if not implemented */
+ if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+ return 0;
+
return ret;
}
@@ -681,6 +871,11 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
return 0;
}
+static const struct of_device_id legacy_scpi_of_match[] = {
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
@@ -693,6 +888,9 @@ static int scpi_probe(struct platform_device *pdev)
if (!scpi_info)
return -ENOMEM;
+ if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+ scpi_info->is_legacy = true;
+
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
dev_err(dev, "no mboxes property in '%s'\n", np->full_name);
@@ -755,8 +953,21 @@ err:
scpi_info->channels = scpi_chan;
scpi_info->num_chans = count;
+ scpi_info->commands = scpi_std_commands;
+
platform_set_drvdata(pdev, scpi_info);
+ if (scpi_info->is_legacy) {
+ /* Replace with legacy variants */
+ scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+ scpi_info->commands = scpi_legacy_commands;
+
+ /* Fill priority bitmap */
+ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+ set_bit(legacy_hpriority_cmds[idx],
+ scpi_info->cmd_priority);
+ }
+
ret = scpi_init_versions(scpi_info);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
@@ -781,6 +992,7 @@ err:
static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi"},
+ {.compatible = "arm,scpi-pre-1.0"},
{},
};
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8263429e21b8..6c60a5087caf 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -630,7 +630,7 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np)
+ if (!np || !of_device_is_available(np))
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
new file mode 100644
index 000000000000..44bdb78f837b
--- /dev/null
+++ b/drivers/firmware/psci_checker.c
@@ -0,0 +1,490 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+ COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+ COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+ int migrate_type = -1;
+ int cpu;
+
+ if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+ pr_warn("Missing PSCI operations, aborting tests\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (psci_ops.migrate_info_type)
+ migrate_type = psci_ops.migrate_info_type();
+
+ if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+ migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ /* There is a UP Trusted OS, find on which core it resides. */
+ for_each_online_cpu(cpu)
+ if (psci_tos_resident_on(cpu)) {
+ tos_resident_cpu = cpu;
+ break;
+ }
+ if (tos_resident_cpu == -1)
+ pr_warn("UP Trusted OS resides on no online CPU\n");
+ }
+
+ return 0;
+}
+
+static int find_clusters(const struct cpumask *cpus,
+ const struct cpumask **clusters)
+{
+ unsigned int nb = 0;
+ cpumask_var_t tmp;
+
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(tmp, cpus);
+
+ while (!cpumask_empty(tmp)) {
+ const struct cpumask *cluster =
+ topology_core_cpumask(cpumask_any(tmp));
+
+ clusters[nb++] = cluster;
+ cpumask_andnot(tmp, tmp, cluster);
+ }
+
+ free_cpumask_var(tmp);
+ return nb;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+ struct cpumask *offlined_cpus)
+{
+ int cpu;
+ int err = 0;
+
+ cpumask_clear(offlined_cpus);
+
+ /* Try to power down all CPUs in the mask. */
+ for_each_cpu(cpu, cpus) {
+ int ret = cpu_down(cpu);
+
+ /*
+ * cpu_down() checks the number of online CPUs before the TOS
+ * resident CPU.
+ */
+ if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+ if (ret != -EBUSY) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down last online CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (cpu == tos_resident_cpu) {
+ if (ret != -EPERM) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down TOS resident CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power down CPU %d\n", ret, cpu);
+ ++err;
+ }
+
+ if (ret == 0)
+ cpumask_set_cpu(cpu, offlined_cpus);
+ }
+
+ /* Try to power up all the CPUs that have been offlined. */
+ for_each_cpu(cpu, offlined_cpus) {
+ int ret = cpu_up(cpu);
+
+ if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power up CPU %d\n", ret, cpu);
+ ++err;
+ } else {
+ cpumask_clear_cpu(cpu, offlined_cpus);
+ }
+ }
+
+ /*
+ * Something went bad at some point and some CPUs could not be turned
+ * back on.
+ */
+ WARN_ON(!cpumask_empty(offlined_cpus) ||
+ num_online_cpus() != nb_available_cpus);
+
+ return err;
+}
+
+static int hotplug_tests(void)
+{
+ int err;
+ cpumask_var_t offlined_cpus;
+ int i, nb_cluster;
+ const struct cpumask **clusters;
+ char *page_buf;
+
+ err = -ENOMEM;
+ if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+ return err;
+ /* We may have up to nb_available_cpus clusters. */
+ clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
+ GFP_KERNEL);
+ if (!clusters)
+ goto out_free_cpus;
+ page_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!page_buf)
+ goto out_free_clusters;
+
+ err = 0;
+ nb_cluster = find_clusters(cpu_online_mask, clusters);
+
+ /*
+ * Of course the last CPU cannot be powered down and cpu_down() should
+ * refuse doing that.
+ */
+ pr_info("Trying to turn off and on again all CPUs\n");
+ err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+ /*
+ * Take down CPUs by cluster this time. When the last CPU is turned
+ * off, the cluster itself should shut down.
+ */
+ for (i = 0; i < nb_cluster; ++i) {
+ int cluster_id =
+ topology_physical_package_id(cpumask_any(clusters[i]));
+ ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+ clusters[i]);
+ /* Remove trailing newline. */
+ page_buf[len - 1] = '\0';
+ pr_info("Trying to turn off and on again cluster %d "
+ "(CPUs %s)\n", cluster_id, page_buf);
+ err += down_and_up_cpus(clusters[i], offlined_cpus);
+ }
+
+ free_page((unsigned long)page_buf);
+out_free_clusters:
+ kfree(clusters);
+out_free_cpus:
+ free_cpumask_var(offlined_cpus);
+ return err;
+}
+
+static void dummy_callback(unsigned long ignored) {}
+
+static int suspend_cpu(int index, bool broadcast)
+{
+ int ret;
+
+ arch_cpu_idle_enter();
+
+ if (broadcast) {
+ /*
+ * The local timer will be shut down, we need to enter tick
+ * broadcast.
+ */
+ ret = tick_broadcast_enter();
+ if (ret) {
+ /*
+ * In the absence of hardware broadcast mechanism,
+ * this CPU might be used to broadcast wakeups, which
+ * may be why entering tick broadcast has failed.
+ * There is little the kernel can do to work around
+ * that, so enter WFI instead (idle state 0).
+ */
+ cpu_do_idle();
+ ret = 0;
+ goto out_arch_exit;
+ }
+ }
+
+ /*
+ * Replicate the common ARM cpuidle enter function
+ * (arm_enter_idle_state).
+ */
+ ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+
+ if (broadcast)
+ tick_broadcast_exit();
+
+out_arch_exit:
+ arch_cpu_idle_exit();
+
+ return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+ int cpu = (long)arg;
+ int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+ struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv;
+ /* No need for an actual callback, we just want to wake up the CPU. */
+ struct timer_list wakeup_timer =
+ TIMER_INITIALIZER(dummy_callback, 0, 0);
+
+ /* Wait for the main thread to give the start signal. */
+ wait_for_completion(&suspend_threads_started);
+
+ /* Set maximum priority to preempt all other threads on this CPU. */
+ if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
+ pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+ cpu);
+
+ dev = this_cpu_read(cpuidle_devices);
+ drv = cpuidle_get_cpu_driver(dev);
+
+ pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+ cpu, drv->state_count - 1);
+
+ for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+ int index;
+ /*
+ * Test all possible states, except 0 (which is usually WFI and
+ * doesn't use PSCI).
+ */
+ for (index = 1; index < drv->state_count; ++index) {
+ struct cpuidle_state *state = &drv->states[index];
+ bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+ int ret;
+
+ /*
+ * Set the timer to wake this CPU up in some time (which
+ * should be largely sufficient for entering suspend).
+ * If the local tick is disabled when entering suspend,
+ * suspend_cpu() takes care of switching to a broadcast
+ * tick, so the timer will still wake us up.
+ */
+ mod_timer(&wakeup_timer, jiffies +
+ usecs_to_jiffies(state->target_residency));
+
+ /* IRQs must be disabled during suspend operations. */
+ local_irq_disable();
+
+ ret = suspend_cpu(index, broadcast);
+
+ /*
+ * We have woken up. Re-enable IRQs to handle any
+ * pending interrupt, do not wait until the end of the
+ * loop.
+ */
+ local_irq_enable();
+
+ if (ret == index) {
+ ++nb_suspend;
+ } else if (ret >= 0) {
+ /* We did not enter the expected state. */
+ ++nb_shallow_sleep;
+ } else {
+ pr_err("Failed to suspend CPU %d: error %d "
+ "(requested state %d, cycle %d)\n",
+ cpu, ret, index, i);
+ ++nb_err;
+ }
+ }
+ }
+
+ /*
+ * Disable the timer to make sure that the timer will not trigger
+ * later.
+ */
+ del_timer(&wakeup_timer);
+
+ if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+ complete(&suspend_threads_done);
+
+ /* Give up on RT scheduling and wait for termination. */
+ sched_priority.sched_priority = 0;
+ if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
+ pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+ cpu);
+ for (;;) {
+ /* Needs to be set first to avoid missing a wakeup. */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
+ schedule();
+ }
+
+ pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+ cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+ return nb_err;
+}
+
+static int suspend_tests(void)
+{
+ int i, cpu, err = 0;
+ struct task_struct **threads;
+ int nb_threads = 0;
+
+ threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+ GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ /*
+ * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+ * mode, as it might interfere with the suspend threads on other CPUs.
+ * This does not prevent the suspend threads from using cpuidle (only
+ * the idle tasks check this status). Take the idle lock so that
+ * the cpuidle driver and device look-up can be carried out safely.
+ */
+ cpuidle_pause_and_lock();
+
+ for_each_online_cpu(cpu) {
+ struct task_struct *thread;
+ /* Check that cpuidle is available on that CPU. */
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+ if (!dev || !drv) {
+ pr_warn("cpuidle not available on CPU %d, ignoring\n",
+ cpu);
+ continue;
+ }
+
+ thread = kthread_create_on_cpu(suspend_test_thread,
+ (void *)(long)cpu, cpu,
+ "psci_suspend_test");
+ if (IS_ERR(thread))
+ pr_err("Failed to create kthread on CPU %d\n", cpu);
+ else
+ threads[nb_threads++] = thread;
+ }
+
+ if (nb_threads < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ atomic_set(&nb_active_threads, nb_threads);
+
+ /*
+ * Wake up the suspend threads. To avoid the main thread being preempted
+ * before all the threads have been unparked, the suspend threads will
+ * wait for the completion of suspend_threads_started.
+ */
+ for (i = 0; i < nb_threads; ++i)
+ wake_up_process(threads[i]);
+ complete_all(&suspend_threads_started);
+
+ wait_for_completion(&suspend_threads_done);
+
+
+ /* Stop and destroy all threads, get return status. */
+ for (i = 0; i < nb_threads; ++i)
+ err += kthread_stop(threads[i]);
+ out:
+ cpuidle_resume_and_unlock();
+ kfree(threads);
+ return err;
+}
+
+static int __init psci_checker(void)
+{
+ int ret;
+
+ /*
+ * Since we're in an initcall, we assume that all the CPUs that all
+ * CPUs that can be onlined have been onlined.
+ *
+ * The tests assume that hotplug is enabled but nobody else is using it,
+ * otherwise the results will be unpredictable. However, since there
+ * is no userspace yet in initcalls, that should be fine, as long as
+ * no torture test is running at the same time (see Kconfig).
+ */
+ nb_available_cpus = num_online_cpus();
+
+ /* Check PSCI operations are set up and working. */
+ ret = psci_ops_check();
+ if (ret)
+ return ret;
+
+ pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+ pr_info("Starting hotplug tests\n");
+ ret = hotplug_tests();
+ if (ret == 0)
+ pr_info("Hotplug tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in hotplug tests\n", ret);
+ else {
+ pr_err("Out of memory\n");
+ return ret;
+ }
+
+ pr_info("Starting suspend tests (%d cycles per state)\n",
+ NUM_SUSPEND_CYCLE);
+ ret = suspend_tests();
+ if (ret == 0)
+ pr_info("Suspend tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in suspend tests\n", ret);
+ else {
+ switch (ret) {
+ case -ENOMEM:
+ pr_err("Out of memory\n");
+ break;
+ case -ENODEV:
+ pr_warn("Could not start suspend tests on any CPU\n");
+ break;
+ }
+ }
+
+ pr_info("PSCI checker completed\n");
+ return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index d95c70227c05..893f953eaccf 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -28,6 +28,10 @@
#include "qcom_scm.h"
+#define SCM_HAS_CORE_CLK BIT(0)
+#define SCM_HAS_IFACE_CLK BIT(1)
+#define SCM_HAS_BUS_CLK BIT(2)
+
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
@@ -323,32 +327,40 @@ EXPORT_SYMBOL(qcom_scm_is_available);
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
+ unsigned long clks;
int ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
return -ENOMEM;
- scm->core_clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(scm->core_clk)) {
- if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+ clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (clks & SCM_HAS_CORE_CLK) {
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to acquire core clk\n");
return PTR_ERR(scm->core_clk);
-
- scm->core_clk = NULL;
+ }
}
- if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+ if (clks & SCM_HAS_IFACE_CLK) {
scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(scm->iface_clk)) {
if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ dev_err(&pdev->dev,
+ "failed to acquire iface clk\n");
return PTR_ERR(scm->iface_clk);
}
+ }
+ if (clks & SCM_HAS_BUS_CLK) {
scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(scm->bus_clk)) {
if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire bus clk\n");
+ dev_err(&pdev->dev,
+ "failed to acquire bus clk\n");
return PTR_ERR(scm->bus_clk);
}
}
@@ -356,7 +368,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
scm->reset.ops = &qcom_scm_pas_reset_ops;
scm->reset.nr_resets = 1;
scm->reset.of_node = pdev->dev.of_node;
- reset_controller_register(&scm->reset);
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
/* vote for max clk rate for highest performance */
ret = clk_set_rate(scm->core_clk, INT_MAX);
@@ -372,10 +386,23 @@ static int qcom_scm_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_scm_dt_match[] = {
- { .compatible = "qcom,scm-apq8064",},
- { .compatible = "qcom,scm-msm8660",},
- { .compatible = "qcom,scm-msm8960",},
- { .compatible = "qcom,scm",},
+ { .compatible = "qcom,scm-apq8064",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8660",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8960",
+ .data = (void *) SCM_HAS_CORE_CLK,
+ },
+ { .compatible = "qcom,scm-msm8996",
+ .data = NULL, /* no clocks */
+ },
+ { .compatible = "qcom,scm",
+ .data = (void *)(SCM_HAS_CORE_CLK
+ | SCM_HAS_IFACE_CLK
+ | SCM_HAS_BUS_CLK),
+ },
{}
};
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 000000000000..ff2730d5c468
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,25 @@
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+ bool "Tegra IVC protocol"
+ depends on ARCH_TEGRA
+ help
+ IVC (Inter-VM Communication) protocol is part of the IPC
+ (Inter Processor Communication) framework on Tegra. It maintains the
+ data and the different commuication channels in SysRAM or RAM and
+ keeps the content is synchronization between host CPU and remote
+ processors.
+
+config TEGRA_BPMP
+ bool "Tegra BPMP driver"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ help
+ BPMP (Boot and Power Management Processor) is designed to off-loading
+ the PM functions which include clock/DVFS/thermal/power from the CPU.
+ It needs HSP as the HW synchronization and notification module and
+ IVC module as the message communication protocol.
+
+ This driver manages the IPC interface between host CPU and the
+ firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 000000000000..e34a2f79e1ad
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TEGRA_BPMP) += bpmp.o
+obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 000000000000..4ff02d310868
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#define MSG_ACK BIT(0)
+#define MSG_RING BIT(1)
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_bpmp *bpmp;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
+
+ bpmp = platform_get_drvdata(pdev);
+ if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
+ put_device(&pdev->dev);
+ goto put;
+ }
+
+put:
+ of_node_put(np);
+ return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+ if (bpmp)
+ put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
+{
+ return channel - channel->bpmp->channels;
+}
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned int offset, count;
+ int index;
+
+ offset = bpmp->soc->channels.thread.offset;
+ count = bpmp->soc->channels.thread.count;
+
+ index = tegra_bpmp_channel_get_index(channel);
+ if (index < 0)
+ return index;
+
+ if (index < offset || index >= offset + count)
+ return -EINVAL;
+
+ return index - offset;
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ unsigned int offset = bpmp->soc->channels.thread.offset;
+ unsigned int count = bpmp->soc->channels.thread.count;
+
+ if (index >= count)
+ return NULL;
+
+ return &bpmp->channels[offset + index];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
+
+ return &bpmp->channels[offset + smp_processor_id()];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
+
+ return &bpmp->channels[offset];
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+ return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->tx.size == 0 || msg->tx.data) &&
+ (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t end;
+
+ end = ktime_add_us(ktime_get(), timeout);
+
+ do {
+ if (tegra_bpmp_master_acked(channel))
+ return 0;
+ } while (ktime_before(ktime_get(), end));
+
+ return -ETIMEDOUT;
+}
+
+static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t start, now;
+
+ start = ns_to_ktime(local_clock());
+
+ do {
+ if (tegra_bpmp_master_free(channel))
+ return 0;
+
+ now = ns_to_ktime(local_clock());
+ } while (ktime_us_delta(now, start) < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ if (data && size > 0)
+ memcpy(data, channel->ib->data, size);
+
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned long flags;
+ ssize_t err;
+ int index;
+
+ index = tegra_bpmp_channel_get_thread_index(channel);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+ err = __tegra_bpmp_channel_read(channel, data, size);
+ clear_bit(index, bpmp->threaded.allocated);
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ up(&bpmp->threaded.lock);
+
+ return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ channel->ob->code = mrq;
+ channel->ob->flags = flags;
+
+ if (data && size > 0)
+ memcpy(channel->ob->data, data, size);
+
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+ const void *data, size_t size)
+{
+ unsigned long timeout = bpmp->soc->channels.thread.timeout;
+ unsigned int count = bpmp->soc->channels.thread.count;
+ struct tegra_bpmp_channel *channel;
+ unsigned long flags;
+ unsigned int index;
+ int err;
+
+ err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+ if (err < 0)
+ return ERR_PTR(err);
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ index = find_first_zero_bit(bpmp->threaded.allocated, count);
+ if (index == count) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, index);
+ if (!channel) {
+ channel = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ if (!tegra_bpmp_master_free(channel)) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.allocated);
+
+ err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+ data, size);
+ if (err < 0) {
+ clear_bit(index, bpmp->threaded.allocated);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.busy);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ return channel;
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ int err;
+
+ err = tegra_bpmp_wait_master_free(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ int err;
+
+ if (WARN_ON(!irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_channel_get_tx(bpmp);
+
+ err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+ msg->tx.data, msg->tx.size);
+ if (err < 0)
+ return err;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ err = tegra_bpmp_wait_ack(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned long timeout;
+ int err;
+
+ if (WARN_ON(irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+ err = wait_for_completion_timeout(&channel->completion, timeout);
+ if (err == 0)
+ return -ETIMEDOUT;
+
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ struct tegra_bpmp_mrq *entry;
+
+ list_for_each_entry(entry, &bpmp->mrqs, list)
+ if (entry->mrq == mrq)
+ return entry;
+
+ return NULL;
+}
+
+static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
+ int code, const void *data, size_t size)
+{
+ unsigned long flags = channel->ib->flags;
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ struct tegra_bpmp_mb_data *frame;
+ int err;
+
+ if (WARN_ON(size > MSG_DATA_MIN_SZ))
+ return;
+
+ err = tegra_ivc_read_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if ((flags & MSG_ACK) == 0)
+ return;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (WARN_ON(IS_ERR(frame)))
+ return;
+
+ frame->code = code;
+
+ if (data && size > 0)
+ memcpy(frame->data, data, size);
+
+ err = tegra_ivc_write_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if (flags & MSG_RING) {
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (WARN_ON(err < 0))
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+ }
+}
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq,
+ struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp_mrq *entry;
+ u32 zero = 0;
+
+ spin_lock(&bpmp->lock);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry) {
+ spin_unlock(&bpmp->lock);
+ tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+ return;
+ }
+
+ entry->handler(mrq, channel, entry->data);
+
+ spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry->mrq = mrq;
+ entry->handler = handler;
+ entry->data = data;
+ list_add(&entry->list, &bpmp->mrqs);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry)
+ goto unlock;
+
+ list_del(&entry->list);
+ devm_kfree(bpmp->dev, entry);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data)
+{
+ struct mrq_ping_request *request;
+ struct mrq_ping_response response;
+
+ request = (struct mrq_ping_request *)channel->ib->data;
+
+ memset(&response, 0, sizeof(response));
+ response.reply = request->challenge << 1;
+
+ tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+ struct mrq_ping_response response;
+ struct mrq_ping_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ ktime_t start, end;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.challenge = 1;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PING;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ local_irq_save(flags);
+ start = ktime_get();
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ end = ktime_get();
+ local_irq_restore(flags);
+
+ if (!err)
+ dev_dbg(bpmp->dev,
+ "ping ok: challenge: %u, response: %u, time: %lld\n",
+ request.challenge, response.reply,
+ ktime_to_us(ktime_sub(end, start)));
+
+ return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ struct mrq_query_tag_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ dma_addr_t phys;
+ void *virt;
+ int err;
+
+ virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+ request.addr = phys;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_QUERY_TAG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ local_irq_save(flags);
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ local_irq_restore(flags);
+
+ if (err == 0)
+ strlcpy(tag, virt, size);
+
+ dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+
+ return err;
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+ unsigned long flags = channel->ob->flags;
+
+ if ((flags & MSG_RING) == 0)
+ return;
+
+ complete(&channel->completion);
+}
+
+static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+ struct tegra_bpmp_channel *channel;
+ unsigned int i, count;
+ unsigned long *busy;
+
+ channel = tegra_bpmp_channel_get_rx(bpmp);
+ count = bpmp->soc->channels.thread.count;
+ busy = bpmp->threaded.busy;
+
+ if (tegra_bpmp_master_acked(channel))
+ tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+ spin_lock(&bpmp->lock);
+
+ for_each_set_bit(i, busy, count) {
+ struct tegra_bpmp_channel *channel;
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, i);
+ if (!channel)
+ continue;
+
+ if (tegra_bpmp_master_acked(channel)) {
+ tegra_bpmp_channel_signal(channel);
+ clear_bit(i, busy);
+ }
+ }
+
+ spin_unlock(&bpmp->lock);
+}
+
+static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ int err;
+
+ if (WARN_ON(bpmp->mbox.channel == NULL))
+ return;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+}
+
+static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ bpmp->rx.virt + offset, bpmp->rx.phys + offset,
+ bpmp->tx.virt + offset, bpmp->tx.phys + offset,
+ 1, message_size, tegra_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp_channel *channel;
+ struct tegra_bpmp *bpmp;
+ unsigned int i;
+ char tag[32];
+ size_t size;
+ int err;
+
+ bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+ if (!bpmp)
+ return -ENOMEM;
+
+ bpmp->soc = of_device_get_match_data(&pdev->dev);
+ bpmp->dev = &pdev->dev;
+
+ bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
+ if (!bpmp->tx.pool) {
+ dev_err(&pdev->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
+ if (!bpmp->tx.virt) {
+ dev_err(&pdev->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ INIT_LIST_HEAD(&bpmp->mrqs);
+ spin_lock_init(&bpmp->lock);
+
+ bpmp->threaded.count = bpmp->soc->channels.thread.count;
+ sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+ size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+ bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.allocated) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.busy) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
+ bpmp->soc->channels.thread.count +
+ bpmp->soc->channels.cpu_rx.count;
+
+ bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
+ sizeof(*channel), GFP_KERNEL);
+ if (!bpmp->channels) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ /* message channel initialization */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ err = tegra_bpmp_channel_init(channel, bpmp, i);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ bpmp->mbox.client.dev = &pdev->dev;
+ bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
+ bpmp->mbox.client.tx_block = false;
+ bpmp->mbox.client.knows_txdone = false;
+
+ bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
+ if (IS_ERR(bpmp->mbox.channel)) {
+ err = PTR_ERR(bpmp->mbox.channel);
+ dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ /* reset message channels */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ tegra_bpmp_channel_reset(channel);
+ }
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+ tegra_bpmp_mrq_handle_ping, bpmp);
+ if (err < 0)
+ goto free_mbox;
+
+ err = tegra_bpmp_ping(bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+ goto free_mrq;
+ }
+
+ dev_info(&pdev->dev, "firmware: %s\n", tag);
+
+ err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ platform_set_drvdata(pdev, bpmp);
+
+ return 0;
+
+free_mrq:
+ tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+free_mbox:
+ mbox_free_channel(bpmp->mbox.channel);
+cleanup_channels:
+ while (i--)
+ tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
+free_rx:
+ gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
+free_tx:
+ gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
+ return err;
+}
+
+static const struct tegra_bpmp_soc tegra186_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 6,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 6,
+ .count = 7,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 13,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .num_resets = 193,
+};
+
+static const struct of_device_id tegra_bpmp_match[] = {
+ { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+ { }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+ .driver = {
+ .name = "tegra-bpmp",
+ .of_match_table = tegra_bpmp_match,
+ },
+ .probe = tegra_bpmp_probe,
+};
+
+static int __init tegra_bpmp_init(void)
+{
+ return platform_driver_register(&tegra_bpmp_driver);
+}
+core_initcall(tegra_bpmp_init);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 000000000000..29ecfd815320
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ TEGRA_IVC_STATE_SYNC,
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+ union {
+ struct {
+ /* fields owned by the transmitting end */
+ u32 count;
+ u32 state;
+ };
+
+ u8 pad[TEGRA_IVC_ALIGN];
+ } tx;
+
+ union {
+ /* fields owned by the receiving end */
+ u32 count;
+ u8 pad[TEGRA_IVC_ALIGN];
+ } rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so create snapshots with ACCESS_ONCE() to
+ * ensure that these checks use the same values.
+ */
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks
+ * where a server could be easily fooled into believing that there's
+ * an extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because
+ * it gives the impression that the channel has gone silent.
+ */
+ if (tx - rx > ivc->num_frames)
+ return true;
+
+ return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in tegra_ivc_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->tx.channel->tx.count) =
+ ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+
+ if (ivc->tx.position == ivc->num_frames - 1)
+ ivc->tx.position = 0;
+ else
+ ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->rx.channel->rx.count) =
+ ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+
+ if (ivc->rx.position == ivc->num_frames - 1)
+ ivc->rx.position = 0;
+ else
+ ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * tx.channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx.channel->state to
+ * TEGRA_IVC_STATE_ACK is not allowed.
+ */
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses
+ * to an IVC channel by checking the old queue pointers first.
+ *
+ * Synchronization is only necessary when these pointers indicate
+ * empty or full.
+ */
+ if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+ if (tegra_ivc_empty(ivc, ivc->rx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ if (!tegra_ivc_full(ivc, ivc->tx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+ if (tegra_ivc_full(ivc, ivc->tx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header,
+ unsigned int frame)
+{
+ if (WARN_ON(frame >= ivc->num_frames))
+ return ERR_PTR(-EINVAL);
+
+ return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame)
+{
+ unsigned long offset;
+
+ offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ if (WARN_ON(ivc == NULL))
+ return ERR_PTR(-EINVAL);
+
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /*
+ * Order observation of ivc->rx.position potentially indicating new
+ * data before data read.
+ */
+ smp_rmb();
+
+ tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+ ivc->frame_size);
+
+ return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ int err;
+
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_advance_rx(ivc);
+
+ tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+ /*
+ * Ensure our write to ivc->rx.position occurs before our read from
+ * ivc->tx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from full to non-full. The available
+ * count can only asynchronously increase, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+ if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+ ivc->frame_size);
+
+ /*
+ * Order any possible stores to the frame before update of
+ * ivc->tx.position.
+ */
+ smp_wmb();
+
+ tegra_ivc_advance_tx(ivc);
+ tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+ /*
+ * Ensure our write to ivc->tx.position occurs before our read from
+ * ivc->rx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from empty to non-empty. The available
+ * count can only asynchronously decrease, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+ if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+ ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ * IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ enum tegra_ivc_state state;
+
+ /* Copy the receiver's state out of shared memory. */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+ state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+
+ if (state == TEGRA_IVC_STATE_SYNC) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of TEGRA_IVC_STATE_SYNC before stores
+ * clearing tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+ state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+ return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+ if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+ pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+ __func__, queue_size, TEGRA_IVC_ALIGN);
+ return 0;
+ }
+
+ return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+ unsigned int num_frames, size_t frame_size)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+ TEGRA_IVC_ALIGN));
+
+ if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+ pr_err("num_frames * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+ pr_err("frame size not adequately aligned: %zu\n", frame_size);
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", rx);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", tx);
+ return -EINVAL;
+ }
+
+ if (rx < tx) {
+ if (rx + frame_size * num_frames > tx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ rx, frame_size * num_frames, tx);
+ return -EINVAL;
+ }
+ } else {
+ if (tx + frame_size * num_frames > rx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ tx, frame_size * num_frames, rx);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data)
+{
+ size_t queue_size;
+ int err;
+
+ if (WARN_ON(!ivc || !notify))
+ return -EINVAL;
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in an int.
+ */
+ if (frame_size > INT_MAX)
+ return -E2BIG;
+
+ err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+ num_frames, frame_size);
+ if (err < 0)
+ return err;
+
+ queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+ if (peer) {
+ ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->rx.phys == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->tx.phys == DMA_ERROR_CODE) {
+ dma_unmap_single(peer, ivc->rx.phys, queue_size,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+ } else {
+ ivc->rx.phys = rx_phys;
+ ivc->tx.phys = tx_phys;
+ }
+
+ ivc->rx.channel = rx;
+ ivc->tx.channel = tx;
+ ivc->peer = peer;
+ ivc->notify = notify;
+ ivc->notify_data = data;
+ ivc->frame_size = frame_size;
+ ivc->num_frames = num_frames;
+
+ /*
+ * These values aren't necessarily correct until the channel has been
+ * reset.
+ */
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+ if (ivc->peer) {
+ size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+ ivc->frame_size);
+
+ dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+ DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 000000000000..874ff32db366
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,1991 @@
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message: Transmit message
+ * @rx_len: Receive message length
+ * @xfer_buf: Preallocated buffer to store receive message
+ * Since we work with request-ACK protocol, we can
+ * reuse the same buffer for the rx path as we
+ * use for the tx path.
+ * @done: completion event
+ */
+struct ti_sci_xfer {
+ struct ti_msgmgr_message tx_message;
+ u8 rx_len;
+ u8 *xfer_buf;
+ struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count: Counting Semaphore for managing max simultaneous
+ * Messages.
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+ struct semaphore sem_xfer_count;
+ struct ti_sci_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+ /* protect transfer allocation */
+ spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @host_id: Host identifier representing the compute entity
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+ u8 host_id;
+ int max_rx_timeout_ms;
+ int max_msgs;
+ int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @nb: Reboot Notifier block
+ * @d: Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle: Instance of TI SCI handle to send to clients.
+ * @cl: Mailbox Client
+ * @chan_tx: Transmit mailbox channel
+ * @chan_rx: Receive mailbox channel
+ * @minfo: Message info
+ * @node: list head
+ * @users: Number of users of this instance
+ */
+struct ti_sci_info {
+ struct device *dev;
+ struct notifier_block nb;
+ const struct ti_sci_desc *desc;
+ struct dentry *d;
+ void __iomem *debug_region;
+ char *debug_buffer;
+ size_t debug_region_size;
+ struct ti_sci_handle handle;
+ struct mbox_client cl;
+ struct mbox_chan *chan_tx;
+ struct mbox_chan *chan_rx;
+ struct ti_sci_xfers_info minfo;
+ struct list_head node;
+ /* protected by ti_sci_list_mutex */
+ int users;
+
+};
+
+#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s: sequence file pointer
+ * @unused: unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+ struct ti_sci_info *info = s->private;
+
+ memcpy_fromio(info->debug_buffer, info->debug_region,
+ info->debug_region_size);
+ /*
+ * We don't trust firmware to leave NULL terminated last byte (hence
+ * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+ * specific data format for debug messages, We just present the data
+ * in the buffer as is - we expect the messages to be self explanatory.
+ */
+ seq_puts(s, info->debug_buffer);
+ return 0;
+}
+
+/**
+ * ti_sci_debug_open() - debug file open
+ * @inode: inode pointer
+ * @file: file pointer
+ *
+ * Return: result of single_open
+ */
+static int ti_sci_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ti_sci_debug_show, inode->i_private);
+}
+
+/* log file operations */
+static const struct file_operations ti_sci_debug_fops = {
+ .open = ti_sci_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ char debug_name[50] = "ti_sci_debug@";
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "debug_messages");
+ info->debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->debug_region))
+ return 0;
+ info->debug_region_size = resource_size(res);
+
+ info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+ sizeof(char), GFP_KERNEL);
+ if (!info->debug_buffer)
+ return -ENOMEM;
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+ info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+ sizeof(debug_name)),
+ 0444, NULL, info, &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+ dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+ info->debug_region, info->debug_region_size, res);
+ return 0;
+}
+
+/**
+ * ti_sci_debugfs_destroy() - clean up log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ */
+static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ if (IS_ERR(info->debug_region))
+ return;
+
+ debugfs_remove(info->d);
+}
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+ return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev: Device pointer corresponding to the SCI entity
+ * @hdr: pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+ struct ti_sci_msg_hdr *hdr)
+{
+ dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+ hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+ struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+ struct device *dev = info->dev;
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_msgmgr_message *mbox_msg = m;
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+ struct ti_sci_xfer *xfer;
+ u8 xfer_id;
+
+ xfer_id = hdr->seq;
+
+ /*
+ * Are we even expecting this?
+ * NOTE: barriers were implicit in locks used for modifying the bitmap
+ */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ /* Is the message of valid length? */
+ if (mbox_msg->len > info->desc->max_msg_size) {
+ dev_err(dev, "Unable to handle %d xfer(max %d)\n",
+ mbox_msg->len, info->desc->max_msg_size);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+ if (mbox_msg->len < xfer->rx_len) {
+ dev_err(dev, "Recv xfer %d < expected %d length\n",
+ mbox_msg->len, xfer->rx_len);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+
+ ti_sci_dump_header_dbg(dev, hdr);
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+ complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info: Pointer to SCI entity information
+ * @msg_type: Message type
+ * @msg_flags: Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+ u16 msg_type, u32 msg_flags,
+ size_t tx_message_size,
+ size_t rx_message_size)
+{
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_hdr *hdr;
+ unsigned long flags;
+ unsigned long bit_pos;
+ u8 xfer_id;
+ int ret;
+ int timeout;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_message_size > info->desc->max_msg_size ||
+ tx_message_size > info->desc->max_msg_size ||
+ rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+ return ERR_PTR(-ERANGE);
+
+ /*
+ * Ensure we have only controlled number of pending messages.
+ * Ideally, we might just have to wait a single message, be
+ * conservative and wait 5 times that..
+ */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+ ret = down_timeout(&minfo->sem_xfer_count, timeout);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Keep the locked section as small as possible */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msgs);
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /*
+ * We already ensured in probe that we can have max messages that can
+ * fit in hdr.seq - NOTE: this improves access latencies
+ * to predictable O(1) access, BUT, it opens us to risk if
+ * remote misbehaves with corrupted message sequence responses.
+ * If that happens, we are going to be messed up anyways..
+ */
+ xfer_id = (u8)bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer->tx_message.len = tx_message_size;
+ xfer->rx_len = (u8)rx_message_size;
+
+ reinit_completion(&xfer->done);
+
+ hdr->seq = xfer_id;
+ hdr->type = msg_type;
+ hdr->host = info->desc->host_id;
+ hdr->flags = msg_flags;
+
+ return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo: transfer info pointer
+ * @xfer: message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+ struct ti_sci_xfer *xfer)
+{
+ unsigned long flags;
+ struct ti_sci_msg_hdr *hdr;
+ u8 xfer_id;
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer_id = hdr->seq;
+
+ /*
+ * Keep the locked section as small as possible
+ * NOTE: we might escape with smp_mb and no lock here..
+ * but just be conservative and symmetric.
+ */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ clear_bit(xfer_id, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /* Increment the count for the next user to get through */
+ up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info: Pointer to SCI entity information
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ struct ti_sci_xfer *xfer)
+{
+ int ret;
+ int timeout;
+ struct device *dev = info->dev;
+
+ ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(info->chan_tx, ret);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info: Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+ struct device *dev = info->dev;
+ struct ti_sci_handle *handle = &info->handle;
+ struct ti_sci_version_info *ver = &handle->version;
+ struct ti_sci_msg_resp_version *rev_info;
+ struct ti_sci_xfer *xfer;
+ int ret;
+
+ /* No need to setup flags since it is expected to respond */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+ 0x0, sizeof(struct ti_sci_msg_hdr),
+ sizeof(*rev_info));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ ver->abi_major = rev_info->abi_major;
+ ver->abi_minor = rev_info->abi_minor;
+ ver->firmware_revision = rev_info->firmware_revision;
+ strncpy(ver->firmware_description, rev_info->firmware_description,
+ sizeof(ver->firmware_description));
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r: pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+ struct ti_sci_msg_hdr *hdr = r;
+
+ return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @flags: flags to setup for the device
+ * @state: State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle: Handle to the device
+ * @id: Device Identifier
+ * @clcnt: Pointer to Context Loss Count
+ * @resets: pointer to resets
+ * @p_state: pointer to p_state
+ * @c_state: pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 *clcnt, u32 *resets,
+ u8 *p_state, u8 *c_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_device_state *req;
+ struct ti_sci_msg_resp_get_device_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!clcnt && !resets && !p_state && !c_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ /* Response is expected, so need of any flags */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+ 0, sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+ req->id = id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (clcnt)
+ *clcnt = resp->context_loss_count;
+ if (resets)
+ *resets = resp->resets;
+ if (p_state)
+ *p_state = resp->programmed_state;
+ if (c_state)
+ *c_state = resp->current_state;
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * NOTE: The request is for exclusive access for the processor.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+ u8 unused;
+
+ /* check the device state which will also tell us if the ID is valid */
+ return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @count: Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+ u32 *count)
+{
+ return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state)
+{
+ int ret;
+ u8 state;
+
+ if (!r_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+ if (ret)
+ return ret;
+
+ *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be stopped
+ * @curr_state: true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be ON
+ * @curr_state: true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @curr_state: true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+ bool *curr_state)
+{
+ int ret;
+ u8 state;
+
+ if (!curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+ if (ret)
+ return ret;
+
+ *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 reset_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_resets *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+ req->id = id;
+ req->resets = reset_state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @reset_state: Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 *reset_state)
+{
+ return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+ NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @flags: Header flags as needed
+ * @state: State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->request_state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @programmed_state: State requested for clock to move to
+ * @current_state: State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u8 *programmed_state, u8 *current_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_state *req;
+ struct ti_sci_msg_resp_get_clock_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!programmed_state && !current_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (programmed_state)
+ *programmed_state = resp->programmed_state;
+ if (current_state)
+ *current_state = resp->current_state;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool needs_ssc, bool can_change_freq,
+ bool enable_input_term)
+{
+ u32 flags = 0;
+
+ flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+ flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+ flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+ MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, bool *req_state)
+{
+ u8 state = 0;
+ int ret;
+
+ if (!req_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+ if (ret)
+ return ret;
+
+ *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u8 parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_parent *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->parent_id = parent_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u8 *parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_parent *req;
+ struct ti_sci_msg_resp_get_clock_parent *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !parent_id)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *parent_id = resp->parent_id;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id,
+ u8 *num_parents)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_num_parents *req;
+ struct ti_sci_msg_resp_get_clock_num_parents *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !num_parents)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *num_parents = resp->num_parents;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @match_freq: Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq,
+ u64 *match_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_query_clock_freq *req;
+ struct ti_sci_msg_resp_query_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !match_freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *match_freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_freq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @freq: Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 clk_id, u64 *freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_freq *req;
+ struct ti_sci_msg_resp_get_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ req->clk_id = clk_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_reboot *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ ret = 0;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info: pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+ struct ti_sci_ops *ops = &info->handle.ops;
+ struct ti_sci_core_ops *core_ops = &ops->core_ops;
+ struct ti_sci_dev_ops *dops = &ops->dev_ops;
+ struct ti_sci_clk_ops *cops = &ops->clk_ops;
+
+ core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+ dops->get_device = ti_sci_cmd_get_device;
+ dops->idle_device = ti_sci_cmd_idle_device;
+ dops->put_device = ti_sci_cmd_put_device;
+
+ dops->is_valid = ti_sci_cmd_dev_is_valid;
+ dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+ dops->is_idle = ti_sci_cmd_dev_is_idle;
+ dops->is_stop = ti_sci_cmd_dev_is_stop;
+ dops->is_on = ti_sci_cmd_dev_is_on;
+ dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+ dops->set_device_resets = ti_sci_cmd_set_device_resets;
+ dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+ cops->get_clock = ti_sci_cmd_get_clock;
+ cops->idle_clock = ti_sci_cmd_idle_clock;
+ cops->put_clock = ti_sci_cmd_put_clock;
+ cops->is_auto = ti_sci_cmd_clk_is_auto;
+ cops->is_on = ti_sci_cmd_clk_is_on;
+ cops->is_off = ti_sci_cmd_clk_is_off;
+
+ cops->set_parent = ti_sci_cmd_clk_set_parent;
+ cops->get_parent = ti_sci_cmd_clk_get_parent;
+ cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+ cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+ cops->set_freq = ti_sci_cmd_clk_set_freq;
+ cops->get_freq = ti_sci_cmd_clk_get_freq;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev: Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+ struct device_node *ti_sci_np;
+ struct list_head *p;
+ struct ti_sci_handle *handle = NULL;
+ struct ti_sci_info *info;
+
+ if (!dev) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+ ti_sci_np = of_get_parent(dev->of_node);
+ if (!ti_sci_np) {
+ dev_err(dev, "No OF information\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle: Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ mutex_lock(&ti_sci_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+ const struct ti_sci_handle **ptr = res;
+ const struct ti_sci_handle *handle = *ptr;
+ int ret;
+
+ ret = ti_sci_put_handle(handle);
+ if (ret)
+ dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev: device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+ const struct ti_sci_handle **ptr;
+ const struct ti_sci_handle *handle;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_handle(dev);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ const struct ti_sci_handle *handle = &info->handle;
+
+ ti_sci_cmd_core_reboot(handle);
+
+ /* call fail OR pass, we should not be here in the first place */
+ return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ .host_id = 2,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 1000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 64,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+ {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct ti_sci_desc *desc;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info = NULL;
+ struct ti_sci_xfers_info *minfo;
+ struct mbox_client *cl;
+ int ret = -EINVAL;
+ int i;
+ int reboot = 0;
+
+ of_id = of_match_device(ti_sci_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ reboot = of_property_read_bool(dev->of_node,
+ "ti,system-reboot-controller");
+ INIT_LIST_HEAD(&info->node);
+ minfo = &info->minfo;
+
+ /*
+ * Pre-allocate messages
+ * NEVER allocate more than what we can indicate in hdr.seq
+ * if we have data description bug, force a fix..
+ */
+ if (WARN_ON(desc->max_msgs >=
+ 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+ return -EINVAL;
+
+ minfo->xfer_block = devm_kcalloc(dev,
+ desc->max_msgs,
+ sizeof(*minfo->xfer_block),
+ GFP_KERNEL);
+ if (!minfo->xfer_block)
+ return -ENOMEM;
+
+ minfo->xfer_alloc_table = devm_kzalloc(dev,
+ BITS_TO_LONGS(desc->max_msgs)
+ * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!minfo->xfer_alloc_table)
+ return -ENOMEM;
+ bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+ xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->xfer_buf)
+ return -ENOMEM;
+
+ xfer->tx_message.buf = xfer->xfer_buf;
+ init_completion(&xfer->done);
+ }
+
+ ret = ti_sci_debugfs_create(pdev, info);
+ if (ret)
+ dev_warn(dev, "Failed to create debug file\n");
+
+ platform_set_drvdata(pdev, info);
+
+ cl = &info->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->rx_callback = ti_sci_rx_callback;
+ cl->knows_txdone = true;
+
+ spin_lock_init(&minfo->xfer_lock);
+ sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+ info->chan_rx = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(info->chan_rx)) {
+ ret = PTR_ERR(info->chan_rx);
+ goto out;
+ }
+
+ info->chan_tx = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(info->chan_tx)) {
+ ret = PTR_ERR(info->chan_tx);
+ goto out;
+ }
+ ret = ti_sci_cmd_get_revision(info);
+ if (ret) {
+ dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+ goto out;
+ }
+
+ ti_sci_setup_ops(info);
+
+ if (reboot) {
+ info->nb.notifier_call = tisci_reboot_handler;
+ info->nb.priority = 128;
+
+ ret = register_restart_handler(&info->nb);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ return ret;
+ }
+ }
+
+ dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+ info->handle.version.abi_major, info->handle.version.abi_minor,
+ info->handle.version.firmware_revision,
+ info->handle.version.firmware_description);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_add_tail(&info->node, &ti_sci_list);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+ if (!IS_ERR(info->chan_tx))
+ mbox_free_channel(info->chan_tx);
+ if (!IS_ERR(info->chan_rx))
+ mbox_free_channel(info->chan_rx);
+ debugfs_remove(info->d);
+ return ret;
+}
+
+static int ti_sci_remove(struct platform_device *pdev)
+{
+ struct ti_sci_info *info;
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ of_platform_depopulate(dev);
+
+ info = platform_get_drvdata(pdev);
+
+ if (info->nb.notifier_call)
+ unregister_restart_handler(&info->nb);
+
+ mutex_lock(&ti_sci_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+ else
+ list_del(&info->node);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ if (!ret) {
+ ti_sci_debugfs_destroy(pdev, info);
+
+ /* Safe to free channels since no more users */
+ mbox_free_channel(info->chan_tx);
+ mbox_free_channel(info->chan_rx);
+ }
+
+ return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+ .remove = ti_sci_remove,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
+ },
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 000000000000..9b611e9e6f6d
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,492 @@
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT 0x0000
+#define TI_SCI_MSG_WAKE_RESET 0x0001
+#define TI_SCI_MSG_VERSION 0x0002
+#define TI_SCI_MSG_WAKE_REASON 0x0003
+#define TI_SCI_MSG_GOODBYE 0x0004
+#define TI_SCI_MSG_SYS_RESET 0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type: Type of messages: One of TI_SCI_MSG* values
+ * @host: Host of the message
+ * @seq: Message identifier indicating a transfer sequence
+ * @flags: Flag for the message
+ */
+struct ti_sci_msg_hdr {
+ u16 type;
+ u8 host;
+ u8 seq;
+#define TI_SCI_MSG_FLAG(val) (1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1)
+ /* Additional Flags */
+ u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr: Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision: Firmware revision
+ * @abi_major: Major version of the ABI that firmware supports
+ * @abi_minor: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+ struct ti_sci_msg_hdr hdr;
+ char firmware_description[32];
+ u16 firmware_revision;
+ u8 abi_major;
+ u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr: Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+ struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF 0
+#define MSG_DEVICE_SW_STATE_RETENTION 1
+#define MSG_DEVICE_SW_STATE_ON 2
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr: Generic header
+ * @id: Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr: Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ * driver can use this monotonic counter to determine if the device has
+ * lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state: The state as programmed by set_device.
+ * - Uses the MSG_DEVICE_SW_* macros
+ * @current_state: The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 context_loss_count;
+ u32 resets;
+ u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF 0
+#define MSG_DEVICE_HW_STATE_ON 1
+#define MSG_DEVICE_HW_STATE_TRANS 2
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ * configuration of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ * and usage of the reset flags are device specific. 0 for a bit
+ * indicates releasing the reset represented by that bit while 1
+ * indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr: Generic Header, Certain flags can be set specific to the clocks:
+ * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ * via spread spectrum clocking.
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ * frequency to be changed while it is running so long as it
+ * is within the min/max limits.
+ * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ * is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @request_state: Request the state for the clock to be set to.
+ * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ * it can be disabled, regardless of the state of the device
+ * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ * automatically manage the state of this clock. If the device
+ * is enabled, then the clock is enabled. If the device is set
+ * to off or retention, then the clock is internally set as not
+ * being required by the device.(default)
+ * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled,
+ * regardless of the state of the device.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ 0
+#define MSG_CLOCK_SW_STATE_AUTO 1
+#define MSG_CLOCK_SW_STATE_REQ 2
+ u8 request_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get state of.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr: Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ * MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ * MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY 0
+#define MSG_CLOCK_HW_STATE_READY 1
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: The new clock parent is selectable by an index via this
+ * parameter.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get the parent for.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr: Generic Header
+ * @parent_id: The current clock parent
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr: Generic header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr: Generic header
+ * @num_parents: Number of clock parents
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u8 num_parents;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ * as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ * at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+#endif /* __TI_SCI_H */
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe8a4cb..ceff415f201c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -124,6 +124,15 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config TEGRA_HSP_MBOX
+ bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+ depends on ARCH_TEGRA_186_SOC
+ help
+ The Tegra HSP driver is used for the interprocessor communication
+ between different remote processors and host processors on Tegra186
+ and later SoCs. Say Y here if you want to have this support.
+ If unsure say N.
+
config XGENE_SLIMPRO_MBOX
tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
depends on ARCH_XGENE
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed8fea9..7dde4f609ae8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 000000000000..0cde356c11ab
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#define HSP_INT_DIMENSIONING 0x380
+#define HSP_nSM_SHIFT 0
+#define HSP_nSS_SHIFT 4
+#define HSP_nAS_SHIFT 8
+#define HSP_nDB_SHIFT 12
+#define HSP_nSI_SHIFT 16
+#define HSP_nINT_MASK 0xf
+
+#define HSP_DB_TRIGGER 0x0
+#define HSP_DB_ENABLE 0x4
+#define HSP_DB_RAW 0x8
+#define HSP_DB_PENDING 0xc
+
+#define HSP_DB_CCPLEX 1
+#define HSP_DB_BPMP 3
+#define HSP_DB_MAX 7
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+ struct tegra_hsp *hsp;
+ struct mbox_chan *chan;
+ void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+ struct tegra_hsp_channel channel;
+ struct list_head list;
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_db_map {
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_soc {
+ const struct tegra_hsp_db_map *map;
+};
+
+struct tegra_hsp {
+ const struct tegra_hsp_soc *soc;
+ struct mbox_controller mbox;
+ void __iomem *regs;
+ unsigned int irq;
+ unsigned int num_sm;
+ unsigned int num_as;
+ unsigned int num_ss;
+ unsigned int num_db;
+ unsigned int num_si;
+ spinlock_t lock;
+
+ struct list_head doorbells;
+};
+
+static inline struct tegra_hsp *
+to_tegra_hsp(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct tegra_hsp, mbox);
+}
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+ return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+ unsigned int offset)
+{
+ writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+ unsigned int offset)
+{
+ return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+ u32 value, unsigned int offset)
+{
+ writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+ u32 value;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+ return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *entry;
+
+ list_for_each_entry(entry, &hsp->doorbells, list)
+ if (entry->master == master)
+ return entry;
+
+ return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ struct tegra_hsp_doorbell *db;
+ unsigned long master, value;
+
+ db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!db)
+ return IRQ_NONE;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+ tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+ spin_lock(&hsp->lock);
+
+ for_each_set_bit(master, &value, hsp->mbox.num_chans) {
+ struct tegra_hsp_doorbell *db;
+
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ /*
+ * Depending on the bootloader chain, the CCPLEX doorbell will
+ * have some doorbells enabled, which means that requesting an
+ * interrupt will immediately fire.
+ *
+ * In that case, db->channel.chan will still be NULL here and
+ * cause a crash if not properly guarded.
+ *
+ * It remains to be seen if ignoring the doorbell in that case
+ * is the correct solution.
+ */
+ if (db && db->channel.chan)
+ mbox_chan_received_data(db->channel.chan, NULL);
+ }
+
+ spin_unlock(&hsp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+ unsigned int master, unsigned int index)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned int offset;
+ unsigned long flags;
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return ERR_PTR(-ENOMEM);
+
+ offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) << 16;
+ offset += index * 0x100;
+
+ db->channel.regs = hsp->regs + offset;
+ db->channel.hsp = hsp;
+
+ db->name = kstrdup_const(name, GFP_KERNEL);
+ db->master = master;
+ db->index = index;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ list_add_tail(&db->list, &hsp->doorbells);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return &db->channel;
+}
+
+static void __tegra_hsp_doorbell_destroy(struct tegra_hsp_doorbell *db)
+{
+ list_del(&db->list);
+ kfree_const(db->name);
+ kfree(db);
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+
+ tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+ return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ if (db->master >= hsp->mbox.num_chans) {
+ dev_err(hsp->mbox.dev,
+ "invalid master ID %u for HSP channel\n",
+ db->master);
+ return -EINVAL;
+ }
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return -ENODEV;
+
+ if (!tegra_hsp_doorbell_can_ring(db))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value |= BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value &= ~BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_doorbell_ops = {
+ .send_data = tegra_hsp_doorbell_send_data,
+ .startup = tegra_hsp_doorbell_startup,
+ .shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static struct mbox_chan *of_tegra_hsp_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+ struct tegra_hsp *hsp = to_tegra_hsp(mbox);
+ unsigned int type = args->args[0];
+ unsigned int master = args->args[1];
+ struct tegra_hsp_doorbell *db;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ unsigned int i;
+
+ switch (type) {
+ case TEGRA_HSP_MBOX_TYPE_DB:
+ db = tegra_hsp_doorbell_get(hsp, master);
+ if (db)
+ channel = &db->channel;
+
+ break;
+
+ default:
+ break;
+ }
+
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ for (i = 0; i < hsp->mbox.num_chans; i++) {
+ chan = &hsp->mbox.chans[i];
+ if (!chan->con_priv) {
+ chan->con_priv = channel;
+ channel->chan = chan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static void tegra_hsp_remove_doorbells(struct tegra_hsp *hsp)
+{
+ struct tegra_hsp_doorbell *db, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ list_for_each_entry_safe(db, tmp, &hsp->doorbells, list)
+ __tegra_hsp_doorbell_destroy(db);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+ const struct tegra_hsp_db_map *map = hsp->soc->map;
+ struct tegra_hsp_channel *channel;
+
+ while (map->name) {
+ channel = tegra_hsp_doorbell_create(hsp, map->name,
+ map->master, map->index);
+ if (IS_ERR(channel)) {
+ tegra_hsp_remove_doorbells(hsp);
+ return PTR_ERR(channel);
+ }
+
+ map++;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp;
+ struct resource *res;
+ u32 value;
+ int err;
+
+ hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+ if (!hsp)
+ return -ENOMEM;
+
+ hsp->soc = of_device_get_match_data(&pdev->dev);
+ INIT_LIST_HEAD(&hsp->doorbells);
+ spin_lock_init(&hsp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsp->regs))
+ return PTR_ERR(hsp->regs);
+
+ value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+ hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+ hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+ err = platform_get_irq_byname(pdev, "doorbell");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get doorbell IRQ: %d\n", err);
+ return err;
+ }
+
+ hsp->irq = err;
+
+ hsp->mbox.of_xlate = of_tegra_hsp_xlate;
+ hsp->mbox.num_chans = 32;
+ hsp->mbox.dev = &pdev->dev;
+ hsp->mbox.txdone_irq = false;
+ hsp->mbox.txdone_poll = false;
+ hsp->mbox.ops = &tegra_hsp_doorbell_ops;
+
+ hsp->mbox.chans = devm_kcalloc(&pdev->dev, hsp->mbox.num_chans,
+ sizeof(*hsp->mbox.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox.chans)
+ return -ENOMEM;
+
+ err = tegra_hsp_add_doorbells(hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add doorbells: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hsp);
+
+ err = mbox_controller_register(&hsp->mbox);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mailbox: %d\n", err);
+ tegra_hsp_remove_doorbells(hsp);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, hsp->irq, tegra_hsp_doorbell_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hsp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&hsp->mbox);
+ tegra_hsp_remove_doorbells(hsp);
+
+ return 0;
+}
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+ { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+ { "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
+ { /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+ { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+ { }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+ .driver = {
+ .name = "tegra-hsp",
+ .of_match_table = tegra_hsp_match,
+ },
+ .probe = tegra_hsp_probe,
+ .remove = tegra_hsp_remove,
+};
+
+static int __init tegra_hsp_init(void)
+{
+ return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 4b4c0c3c3d2f..ec80e35c8dfe 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -134,6 +134,14 @@ config MTK_SMI
mainly help enable/disable iommu and control the power domain and
clocks for each local arbiter.
+config DA8XX_DDRCTL
+ bool "Texas Instruments da8xx DDR2/mDDR driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ This driver is for the DDR2/mDDR Memory Controller present on
+ Texas Instruments da8xx SoCs. It's used to tweak various memory
+ controller configuration options.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b20ae38b5bfb..e88097fbc085 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
+obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index b5ed3bd082b5..047d6fcdcec2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -657,7 +657,7 @@ static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
return -ENOMEM;
newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
- if (!newprop->name)
+ if (!newprop->value)
return -ENOMEM;
newprop->length = sizeof("disabled");
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 12080b05e3e6..b418b39af180 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -85,8 +85,4 @@ static struct platform_driver atmel_ramc_driver = {
},
};
-static int __init atmel_ramc_init(void)
-{
- return platform_driver_register(&atmel_ramc_driver);
-}
-device_initcall(atmel_ramc_init);
+builtin_platform_driver(atmel_ramc_driver);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
new file mode 100644
index 000000000000..030afbe29d0c
--- /dev/null
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -0,0 +1,173 @@
+/*
+ * TI da8xx DDR2/mDDR controller driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+struct da8xx_ddrctl_config_knob {
+ const char *name;
+ u32 reg;
+ u32 mask;
+ u32 shift;
+};
+
+static const struct da8xx_ddrctl_config_knob da8xx_ddrctl_knobs[] = {
+ {
+ .name = "da850-pbbpr",
+ .reg = 0x20,
+ .mask = 0xffffff00,
+ .shift = 0,
+ },
+};
+
+struct da8xx_ddrctl_setting {
+ const char *name;
+ u32 val;
+};
+
+struct da8xx_ddrctl_board_settings {
+ const char *board;
+ const struct da8xx_ddrctl_setting *settings;
+};
+
+static const struct da8xx_ddrctl_setting da850_lcdk_ddrctl_settings[] = {
+ {
+ .name = "da850-pbbpr",
+ .val = 0x20,
+ },
+ { }
+};
+
+static const struct da8xx_ddrctl_board_settings da8xx_ddrctl_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .settings = da850_lcdk_ddrctl_settings,
+ },
+};
+
+static const struct da8xx_ddrctl_config_knob *
+da8xx_ddrctl_match_knob(const struct da8xx_ddrctl_setting *setting)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_knobs); i++) {
+ knob = &da8xx_ddrctl_knobs[i];
+
+ if (strcmp(knob->name, setting->name) == 0)
+ return knob;
+ }
+
+ return NULL;
+}
+
+static const struct da8xx_ddrctl_setting *da8xx_ddrctl_get_board_settings(void)
+{
+ const struct da8xx_ddrctl_board_settings *board_settings;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_board_confs); i++) {
+ board_settings = &da8xx_ddrctl_board_confs[i];
+
+ if (of_machine_is_compatible(board_settings->board))
+ return board_settings->settings;
+ }
+
+ return NULL;
+}
+
+static int da8xx_ddrctl_probe(struct platform_device *pdev)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ const struct da8xx_ddrctl_setting *setting;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *ddrctl;
+ struct device *dev;
+ u32 reg;
+
+ dev = &pdev->dev;
+ node = dev->of_node;
+
+ setting = da8xx_ddrctl_get_board_settings();
+ if (!setting) {
+ dev_err(dev, "no settings defined for this board\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddrctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ddrctl)) {
+ dev_err(dev, "unable to map memory controller registers\n");
+ return PTR_ERR(ddrctl);
+ }
+
+ for (; setting->name; setting++) {
+ knob = da8xx_ddrctl_match_knob(setting);
+ if (!knob) {
+ dev_warn(dev,
+ "no such config option: %s\n", setting->name);
+ continue;
+ }
+
+ if (knob->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev,
+ "register offset of '%s' exceeds mapped memory size\n",
+ knob->name);
+ continue;
+ }
+
+ reg = readl(ddrctl + knob->reg);
+ reg &= knob->mask;
+ reg |= setting->val << knob->shift;
+
+ dev_dbg(dev, "writing 0x%08x to %s\n", reg, setting->name);
+
+ writel(reg, ddrctl + knob->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_ddrctl_of_match[] = {
+ { .compatible = "ti,da850-ddr-controller", },
+ { },
+};
+
+static struct platform_driver da8xx_ddrctl_driver = {
+ .probe = da8xx_ddrctl_probe,
+ .driver = {
+ .name = "da850-ddr-controller",
+ .of_match_table = da8xx_ddrctl_of_match,
+ },
+};
+module_platform_driver(da8xx_ddrctl_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx DDR2/mDDR controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f84b53d6ce50..b33ab8ce47ab 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -19,12 +19,17 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <soc/at91/atmel-secumod.h>
#define SRAM_GRANULARITY 32
@@ -334,12 +339,33 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
return ret;
}
+static int atmel_securam_wait(void)
+{
+ struct regmap *regmap;
+ u32 val;
+
+ regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
+ if (IS_ERR(regmap))
+ return -ENODEV;
+
+ return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
+ val & AT91_SECUMOD_RAMRDY_READY,
+ 10000, 500000);
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
+ {}
+};
+
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
struct resource *res;
size_t size;
int ret;
+ int (*init_func)(void);
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
if (!sram)
@@ -384,6 +410,13 @@ static int sram_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sram);
+ init_func = of_device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func();
+ if (ret)
+ return ret;
+ }
+
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
@@ -405,17 +438,10 @@ static int sram_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sram_dt_ids[] = {
- { .compatible = "mmio-sram" },
- {}
-};
-#endif
-
static struct platform_driver sram_driver = {
.driver = {
.name = "sram",
- .of_match_table = of_match_ptr(sram_dt_ids),
+ .of_match_table = sram_dt_ids,
},
.probe = sram_probe,
.remove = sram_remove,
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 06d9fa2f3bc0..172dc966a01f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -94,5 +94,6 @@ config RESET_ZYNQ
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
+source "drivers/reset/tegra/Kconfig"
endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index bbe7026617fc..13b346e03d84 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,6 +1,7 @@
obj-y += core.o
obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index b8ae1dbd4c17..10368ed8fd13 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -32,6 +32,9 @@ static LIST_HEAD(reset_controller_list);
* @refcnt: Number of gets of this reset_control
* @shared: Is this a shared (1), or an exclusive (0) reset_control?
* @deassert_cnt: Number of times this reset line has been deasserted
+ * @triggered_count: Number of times this reset line has been reset. Currently
+ * only used for shared resets, which means that the value
+ * will be either 0 or 1.
*/
struct reset_control {
struct reset_controller_dev *rcdev;
@@ -40,6 +43,7 @@ struct reset_control {
unsigned int refcnt;
int shared;
atomic_t deassert_count;
+ atomic_t triggered_count;
};
/**
@@ -134,18 +138,35 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
* reset_control_reset - reset the controlled device
* @rstc: reset controller
*
- * Calling this on a shared reset controller is an error.
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance: for all but the first caller this is
+ * a no-op.
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset has been used.
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
- WARN_ON(rstc->shared))
+ int ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
return -EINVAL;
- if (rstc->rcdev->ops->reset)
- return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (!rstc->rcdev->ops->reset)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ if (atomic_inc_return(&rstc->triggered_count) != 1)
+ return 0;
+ }
+
+ ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (rstc->shared && !ret)
+ atomic_dec(&rstc->triggered_count);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(reset_control_reset);
@@ -159,6 +180,8 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*
* For shared reset controls a driver cannot expect the hw's registers and
* internal state to be reset, but must be prepared for this to happen.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_assert(struct reset_control *rstc)
{
@@ -169,6 +192,9 @@ int reset_control_assert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
return -EINVAL;
@@ -185,6 +211,8 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
* @rstc: reset controller
*
* After calling this function, the reset is guaranteed to be deasserted.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_deassert(struct reset_control *rstc)
{
@@ -195,6 +223,9 @@ int reset_control_deassert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (atomic_inc_return(&rstc->deassert_count) != 1)
return 0;
}
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 369f3917fd8e..371197bbd055 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -1,6 +1,8 @@
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
+ * Marvell Berlin reset driver
+ *
* Antoine Tenart <antoine.tenart@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
*
@@ -12,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -91,7 +93,6 @@ static const struct of_device_id berlin_reset_dt_match[] = {
{ .compatible = "marvell,berlin2-reset" },
{ },
};
-MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
static struct platform_driver berlin_reset_driver = {
.probe = berlin2_reset_probe,
@@ -100,9 +101,4 @@ static struct platform_driver berlin_reset_driver = {
.of_match_table = berlin_reset_dt_match,
},
};
-module_platform_driver(berlin_reset_driver);
-
-MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Berlin reset driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin_reset_driver);
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 54cca0055171..a62ad52e262b 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -218,39 +218,17 @@ dis_clk_reg:
return ret;
}
-static int lpc18xx_rgu_remove(struct platform_device *pdev)
-{
- struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
- int ret;
-
- ret = unregister_restart_handler(&rc->restart_nb);
- if (ret)
- dev_warn(&pdev->dev, "failed to unregister restart handler\n");
-
- reset_controller_unregister(&rc->rcdev);
-
- clk_disable_unprepare(rc->clk_delay);
- clk_disable_unprepare(rc->clk_reg);
-
- return 0;
-}
-
static const struct of_device_id lpc18xx_rgu_match[] = {
{ .compatible = "nxp,lpc1850-rgu" },
{ }
};
-MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
static struct platform_driver lpc18xx_rgu_driver = {
.probe = lpc18xx_rgu_probe,
- .remove = lpc18xx_rgu_remove,
.driver = {
- .name = "lpc18xx-reset",
- .of_match_table = lpc18xx_rgu_match,
+ .name = "lpc18xx-reset",
+ .of_match_table = lpc18xx_rgu_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(lpc18xx_rgu_driver);
-
-MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
-MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(lpc18xx_rgu_driver);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index 944980572f79..0d9036dea010 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -80,6 +80,7 @@ static const struct reset_control_ops oxnas_reset_ops = {
static const struct of_device_id oxnas_reset_dt_ids[] = {
{ .compatible = "oxsemi,ox810se-reset", },
+ { .compatible = "oxsemi,ox820-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 78ebf8424375..43e4a9f39b9b 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -1,4 +1,6 @@
/*
+ * Socfpga Reset Controller Driver
+ *
* Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* based on
@@ -16,7 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -148,8 +150,4 @@ static struct platform_driver socfpga_reset_driver = {
.of_match_table = socfpga_reset_dt_ids,
},
};
-module_platform_driver(socfpga_reset_driver);
-
-MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
-MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 3080190b3f90..b44f6b5f87b6 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -142,7 +142,6 @@ static const struct of_device_id sunxi_reset_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-clock-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
static int sunxi_reset_probe(struct platform_device *pdev)
{
@@ -175,8 +174,4 @@ static struct platform_driver sunxi_reset_driver = {
.of_match_table = sunxi_reset_dt_ids,
},
};
-module_platform_driver(sunxi_reset_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index 138f2f205662..87a4e355578f 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -3,6 +3,8 @@
*
* Xilinx Zynq Reset controller driver
*
+ * Author: Moritz Fischer <moritz.fischer@ettus.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
@@ -15,7 +17,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -137,8 +139,4 @@ static struct platform_driver zynq_reset_driver = {
.of_match_table = zynq_reset_dt_ids,
},
};
-module_platform_driver(zynq_reset_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
-MODULE_DESCRIPTION("Zynq Reset Controller Driver");
+builtin_platform_driver(zynq_reset_driver);
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 613178553612..71592b5bfd14 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -3,14 +3,6 @@ if ARCH_STI
config STI_RESET_SYSCFG
bool
-config STIH415_RESET
- bool
- select STI_RESET_SYSCFG
-
-config STIH416_RESET
- bool
- select STI_RESET_SYSCFG
-
config STIH407_RESET
bool
select STI_RESET_SYSCFG
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index dc85dfbe56a9..f9d82411f29e 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
-obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
-obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
deleted file mode 100644
index 6f220cdbef46..000000000000
--- a/drivers/reset/sti/reset-stih415.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih415-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH415 Peripheral powerdown definitions.
- */
-static const char stih415_front[] = "st,stih415-front-syscfg";
-static const char stih415_rear[] = "st,stih415-rear-syscfg";
-static const char stih415_sbc[] = "st,stih415-sbc-syscfg";
-static const char stih415_lpm[] = "st,stih415-lpm-syscfg";
-
-#define STIH415_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit)
-
-#define STIH415_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat)
-
-#define STIH415_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit)
-
-#define STIH415_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit)
-
-#define STIH415_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit)
-
-#define STIH415_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit)
-
-#define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_376 0x130 /* Reset generator 0 control 0 */
-#define SYSCFG_166 0x108 /* Softreset Ethernet 0 */
-#define SYSCFG_31 0x7c /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-
-static const struct syscfg_reset_channel_data stih415_powerdowns[] = {
- [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0),
- [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1),
- [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2),
- [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0),
- [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1),
- [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2),
- [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3),
- [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4),
- [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih415_softresets[] = {
- [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0),
- [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0),
- [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
- [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
- [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
- [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih415_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih415_powerdowns),
- .channels = stih415_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih415_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih415_softresets),
- .channels = stih415_softresets,
-};
-
-static const struct of_device_id stih415_reset_match[] = {
- { .compatible = "st,stih415-powerdown",
- .data = &stih415_powerdown_controller, },
- { .compatible = "st,stih415-softreset",
- .data = &stih415_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih415_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih415",
- .of_match_table = stih415_reset_match,
- },
-};
-
-static int __init stih415_reset_init(void)
-{
- return platform_driver_register(&stih415_reset_driver);
-}
-arch_initcall(stih415_reset_init);
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
deleted file mode 100644
index c581d606ef0f..000000000000
--- a/drivers/reset/sti/reset-stih416.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih416-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH416 Peripheral powerdown definitions.
- */
-static const char stih416_front[] = "st,stih416-front-syscfg";
-static const char stih416_rear[] = "st,stih416-rear-syscfg";
-static const char stih416_sbc[] = "st,stih416-sbc-syscfg";
-static const char stih416_lpm[] = "st,stih416-lpm-syscfg";
-static const char stih416_cpu[] = "st,stih416-cpu-syscfg";
-
-#define STIH416_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit)
-
-#define STIH416_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat)
-
-#define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */
-#define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */
-#define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-#define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */
-#define SYSCFG_7563 0x8cc /* MPE softresets 0 */
-#define SYSCFG_7564 0x8d0 /* MPE softresets 1 */
-
-#define STIH416_SRST_CPU(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit)
-
-#define STIH416_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit)
-
-#define STIH416_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit)
-
-#define STIH416_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit)
-
-#define STIH416_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit)
-
-static const struct syscfg_reset_channel_data stih416_powerdowns[] = {
- [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0),
- [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1),
- [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2),
- [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0),
- [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1),
- [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2),
- [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5),
- [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3),
- [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4),
- [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9),
- [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih416_softresets[] = {
- [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0),
- [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0),
- [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9),
- [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10),
- [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11),
- [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28),
- [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7),
- [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3),
- [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15),
- [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2),
- [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14),
- [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5),
- [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25),
- [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26),
- [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5),
- [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6),
- [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10),
- [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11),
- [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18),
- [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19),
- [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21),
- [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23),
- [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2),
- [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3),
- [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
- [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
- [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
- [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih416_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih416_powerdowns),
- .channels = stih416_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih416_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih416_softresets),
- .channels = stih416_softresets,
-};
-
-static const struct of_device_id stih416_reset_match[] = {
- { .compatible = "st,stih416-powerdown",
- .data = &stih416_powerdown_controller, },
- { .compatible = "st,stih416-softreset",
- .data = &stih416_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih416_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih416",
- .of_match_table = stih416_reset_match,
- },
-};
-
-static int __init stih416_reset_init(void)
-{
- return platform_driver_register(&stih416_reset_driver);
-}
-arch_initcall(stih416_reset_init);
diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig
new file mode 100644
index 000000000000..d2afa293df7d
--- /dev/null
+++ b/drivers/reset/tegra/Kconfig
@@ -0,0 +1,2 @@
+config RESET_TEGRA_BPMP
+ def_bool TEGRA_BPMP
diff --git a/drivers/reset/tegra/Makefile b/drivers/reset/tegra/Makefile
new file mode 100644
index 000000000000..775243ab7383
--- /dev/null
+++ b/drivers/reset/tegra/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RESET_TEGRA_BPMP) += reset-bpmp.o
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
new file mode 100644
index 000000000000..5daf2ee1a396
--- /dev/null
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/reset-controller.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc)
+{
+ return container_of(rstc, struct tegra_bpmp, rstc);
+}
+
+static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+ enum mrq_reset_commands command,
+ unsigned int id)
+{
+ struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
+ struct mrq_reset_request request;
+ struct tegra_bpmp_message msg;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = command;
+ request.reset_id = id;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_RESET;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id);
+}
+
+static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id);
+}
+
+static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id);
+}
+
+static const struct reset_control_ops tegra_bpmp_reset_ops = {
+ .reset = tegra_bpmp_reset_module,
+ .assert = tegra_bpmp_reset_assert,
+ .deassert = tegra_bpmp_reset_deassert,
+};
+
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+ bpmp->rstc.ops = &tegra_bpmp_reset_ops;
+ bpmp->rstc.owner = THIS_MODULE;
+ bpmp->rstc.of_node = bpmp->dev->of_node;
+ bpmp->rstc.nr_resets = bpmp->soc->num_resets;
+
+ return devm_reset_controller_register(bpmp->dev, &bpmp->rstc);
+}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0a4ea809a61b..609bb3424c14 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,7 +23,7 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default ARM64 && ARCH_MEDIATEK
+ default ARCH_MEDIATEK
select REGMAP
select MTK_INFRACFG
select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 837effe19907..beb79162369a 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -11,17 +11,16 @@
* GNU General Public License for more details.
*/
#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
-#include <linux/regmap.h>
-#include <linux/soc/mediatek/infracfg.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include <dt-bindings/power/mt2701-power.h>
#include <dt-bindings/power/mt8173-power.h>
#define SPM_VDE_PWR_CON 0x0210
@@ -29,11 +28,17 @@
#define SPM_VEN_PWR_CON 0x0230
#define SPM_ISP_PWR_CON 0x0238
#define SPM_DIS_PWR_CON 0x023c
+#define SPM_CONN_PWR_CON 0x0280
#define SPM_VEN2_PWR_CON 0x0298
-#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_AUDIO_PWR_CON 0x029c /* MT8173 */
+#define SPM_BDP_PWR_CON 0x029c /* MT2701 */
+#define SPM_ETH_PWR_CON 0x02a0
+#define SPM_HIF_PWR_CON 0x02a4
+#define SPM_IFR_MSC_PWR_CON 0x02a8
#define SPM_MFG_2D_PWR_CON 0x02c0
#define SPM_MFG_ASYNC_PWR_CON 0x02c4
#define SPM_USB_PWR_CON 0x02cc
+
#define SPM_PWR_STATUS 0x060c
#define SPM_PWR_STATUS_2ND 0x0610
@@ -43,10 +48,15 @@
#define PWR_ON_2ND_BIT BIT(3)
#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_STATUS_CONN BIT(1)
#define PWR_STATUS_DISP BIT(3)
#define PWR_STATUS_MFG BIT(4)
#define PWR_STATUS_ISP BIT(5)
#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_BDP BIT(14)
+#define PWR_STATUS_ETH BIT(15)
+#define PWR_STATUS_HIF BIT(16)
+#define PWR_STATUS_IFR_MSC BIT(17)
#define PWR_STATUS_VENC_LT BIT(20)
#define PWR_STATUS_VENC BIT(21)
#define PWR_STATUS_MFG_2D BIT(22)
@@ -55,12 +65,23 @@
#define PWR_STATUS_USB BIT(25)
enum clk_id {
- MT8173_CLK_NONE,
- MT8173_CLK_MM,
- MT8173_CLK_MFG,
- MT8173_CLK_VENC,
- MT8173_CLK_VENC_LT,
- MT8173_CLK_MAX,
+ CLK_NONE,
+ CLK_MM,
+ CLK_MFG,
+ CLK_VENC,
+ CLK_VENC_LT,
+ CLK_ETHIF,
+ CLK_MAX,
+};
+
+static const char * const clk_names[] = {
+ NULL,
+ "mm",
+ "mfg",
+ "venc",
+ "venc_lt",
+ "ethif",
+ NULL,
};
#define MAX_CLKS 2
@@ -76,98 +97,6 @@ struct scp_domain_data {
bool active_wakeup;
};
-static const struct scp_domain_data scp_domain_data[] = {
- [MT8173_POWER_DOMAIN_VDEC] = {
- .name = "vdec",
- .sta_mask = PWR_STATUS_VDEC,
- .ctl_offs = SPM_VDE_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_VENC] = {
- .name = "venc",
- .sta_mask = PWR_STATUS_VENC,
- .ctl_offs = SPM_VEN_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC},
- },
- [MT8173_POWER_DOMAIN_ISP] = {
- .name = "isp",
- .sta_mask = PWR_STATUS_ISP,
- .ctl_offs = SPM_ISP_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_MM] = {
- .name = "mm",
- .sta_mask = PWR_STATUS_DISP,
- .ctl_offs = SPM_DIS_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
- MT8173_TOP_AXI_PROT_EN_MM_M1,
- },
- [MT8173_POWER_DOMAIN_VENC_LT] = {
- .name = "venc_lt",
- .sta_mask = PWR_STATUS_VENC_LT,
- .ctl_offs = SPM_VEN2_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT},
- },
- [MT8173_POWER_DOMAIN_AUDIO] = {
- .name = "audio",
- .sta_mask = PWR_STATUS_AUDIO,
- .ctl_offs = SPM_AUDIO_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_USB] = {
- .name = "usb",
- .sta_mask = PWR_STATUS_USB,
- .ctl_offs = SPM_USB_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- .active_wakeup = true,
- },
- [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
- .name = "mfg_async",
- .sta_mask = PWR_STATUS_MFG_ASYNC,
- .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = 0,
- .clk_id = {MT8173_CLK_MFG},
- },
- [MT8173_POWER_DOMAIN_MFG_2D] = {
- .name = "mfg_2d",
- .sta_mask = PWR_STATUS_MFG_2D,
- .ctl_offs = SPM_MFG_2D_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_MFG] = {
- .name = "mfg",
- .sta_mask = PWR_STATUS_MFG,
- .ctl_offs = SPM_MFG_PWR_CON,
- .sram_pdn_bits = GENMASK(13, 8),
- .sram_pdn_ack_bits = GENMASK(21, 16),
- .clk_id = {MT8173_CLK_NONE},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
- MT8173_TOP_AXI_PROT_EN_MFG_M0 |
- MT8173_TOP_AXI_PROT_EN_MFG_M1 |
- MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
- },
-};
-
-#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data)
-
struct scp;
struct scp_domain {
@@ -179,7 +108,7 @@ struct scp_domain {
};
struct scp {
- struct scp_domain domains[NUM_DOMAINS];
+ struct scp_domain *domains;
struct genpd_onecell_data pd_data;
struct device *dev;
void __iomem *base;
@@ -408,57 +337,55 @@ static bool scpsys_active_wakeup(struct device *dev)
return scpd->data->active_wakeup;
}
-static int scpsys_probe(struct platform_device *pdev)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
+{
+ int i;
+
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+ clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+}
+
+static struct scp *init_scp(struct platform_device *pdev,
+ const struct scp_domain_data *scp_domain_data, int num)
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
- struct clk *clk[MT8173_CLK_MAX];
+ struct clk *clk[CLK_MAX];
scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
if (!scp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
scp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(scp->base))
- return PTR_ERR(scp->base);
+ return ERR_CAST(scp->base);
+
+ scp->domains = devm_kzalloc(&pdev->dev,
+ sizeof(*scp->domains) * num, GFP_KERNEL);
+ if (!scp->domains)
+ return ERR_PTR(-ENOMEM);
pd_data = &scp->pd_data;
pd_data->domains = devm_kzalloc(&pdev->dev,
- sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+ sizeof(*pd_data->domains) * num, GFP_KERNEL);
if (!pd_data->domains)
- return -ENOMEM;
-
- clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
- if (IS_ERR(clk[MT8173_CLK_MM]))
- return PTR_ERR(clk[MT8173_CLK_MM]);
-
- clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
- if (IS_ERR(clk[MT8173_CLK_MFG]))
- return PTR_ERR(clk[MT8173_CLK_MFG]);
-
- clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc");
- if (IS_ERR(clk[MT8173_CLK_VENC]))
- return PTR_ERR(clk[MT8173_CLK_VENC]);
-
- clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt");
- if (IS_ERR(clk[MT8173_CLK_VENC_LT]))
- return PTR_ERR(clk[MT8173_CLK_VENC_LT]);
+ return ERR_PTR(-ENOMEM);
scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"infracfg");
if (IS_ERR(scp->infracfg)) {
dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
PTR_ERR(scp->infracfg));
- return PTR_ERR(scp->infracfg);
+ return ERR_CAST(scp->infracfg);
}
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -467,13 +394,15 @@ static int scpsys_probe(struct platform_device *pdev)
if (PTR_ERR(scpd->supply) == -ENODEV)
scpd->supply = NULL;
else
- return PTR_ERR(scpd->supply);
+ return ERR_CAST(scpd->supply);
}
}
- pd_data->num_domains = NUM_DOMAINS;
+ pd_data->num_domains = num;
+
+ init_clks(pdev, clk);
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -482,13 +411,37 @@ static int scpsys_probe(struct platform_device *pdev)
scpd->scp = scp;
scpd->data = data;
- for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
- scpd->clk[j] = clk[data->clk_id[j]];
+
+ for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+ struct clk *c = clk[data->clk_id[j]];
+
+ if (IS_ERR(c)) {
+ dev_err(&pdev->dev, "%s: clk unavailable\n",
+ data->name);
+ return ERR_CAST(c);
+ }
+
+ scpd->clk[j] = c;
+ }
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+ }
+
+ return scp;
+}
+
+static void mtk_register_power_domains(struct platform_device *pdev,
+ struct scp *scp, int num)
+{
+ struct genpd_onecell_data *pd_data;
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
/*
* Initially turn on all domains to make the domains usable
@@ -507,6 +460,222 @@ static int scpsys_probe(struct platform_device *pdev)
* valid.
*/
+ pd_data = &scp->pd_data;
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+}
+
+/*
+ * MT2701 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt2701[] = {
+ [MT2701_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .bus_prot_mask = 0x0104,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_DISP] = {
+ .name = "disp",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = 0x0002,
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MFG},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_BDP] = {
+ .name = "bdp",
+ .sta_mask = PWR_STATUS_BDP,
+ .ctl_offs = SPM_BDP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ETH] = {
+ .name = "eth",
+ .sta_mask = PWR_STATUS_ETH,
+ .ctl_offs = SPM_ETH_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_HIF] = {
+ .name = "hif",
+ .sta_mask = PWR_STATUS_HIF,
+ .ctl_offs = SPM_HIF_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_IFR_MSC] = {
+ .name = "ifr_msc",
+ .sta_mask = PWR_STATUS_IFR_MSC,
+ .ctl_offs = SPM_IFR_MSC_PWR_CON,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+};
+
+#define NUM_DOMAINS_MT2701 ARRAY_SIZE(scp_domain_data_mt2701)
+
+static int __init scpsys_probe_mt2701(struct platform_device *pdev)
+{
+ struct scp *scp;
+
+ scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701);
+
+ return 0;
+}
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC},
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC_LT},
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_MFG},
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+#define NUM_DOMAINS_MT8173 ARRAY_SIZE(scp_domain_data_mt8173)
+
+static int __init scpsys_probe_mt8173(struct platform_device *pdev)
+{
+ struct scp *scp;
+ struct genpd_onecell_data *pd_data;
+ int ret;
+
+ scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173);
+
+ pd_data = &scp->pd_data;
+
ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
if (ret && IS_ENABLED(CONFIG_PM))
@@ -517,21 +686,39 @@ static int scpsys_probe(struct platform_device *pdev)
if (ret && IS_ENABLED(CONFIG_PM))
dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
- ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
- if (ret)
- dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
-
return 0;
}
+/*
+ * scpsys driver init
+ */
+
static const struct of_device_id of_scpsys_match_tbl[] = {
{
+ .compatible = "mediatek,mt2701-scpsys",
+ .data = scpsys_probe_mt2701,
+ }, {
.compatible = "mediatek,mt8173-scpsys",
+ .data = scpsys_probe_mt8173,
}, {
/* sentinel */
}
};
+static int scpsys_probe(struct platform_device *pdev)
+{
+ int (*probe)(struct platform_device *);
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ probe = of_id->data;
+
+ return probe(pdev);
+}
+
static struct platform_driver scpsys_drv = {
.probe = scpsys_probe,
.driver = {
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 86cc78cd1962..d9115cb5ed9d 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,8 +1,12 @@
+obj-$(CONFIG_SOC_BUS) += renesas-soc.o
+
obj-$(CONFIG_ARCH_RCAR_GEN1) += rcar-rst.o
obj-$(CONFIG_ARCH_RCAR_GEN2) += rcar-rst.o
obj-$(CONFIG_ARCH_R8A7795) += rcar-rst.o
obj-$(CONFIG_ARCH_R8A7796) += rcar-rst.o
+obj-$(CONFIG_ARCH_R8A7743) += rcar-sysc.o r8a7743-sysc.o
+obj-$(CONFIG_ARCH_R8A7745) += rcar-sysc.o r8a7745-sysc.o
obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
new file mode 100644
index 000000000000..9583a327d90c
--- /dev/null
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1M System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
+ .areas = r8a7743_areas,
+ .num_areas = ARRAY_SIZE(r8a7743_areas),
+};
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
new file mode 100644
index 000000000000..d17887c08aa1
--- /dev/null
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1E System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7745-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7745_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7745_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A7745_PD_CA7_SCU, R8A7745_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7745_PD_CA7_CPU0, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7745_PD_CA7_CPU1, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7745_PD_SGX, R8A7745_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7745_sysc_info __initconst = {
+ .areas = r8a7745_areas,
+ .num_areas = ARRAY_SIZE(r8a7745_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 65c8e1eb90c0..225c35c79d9a 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -275,6 +275,12 @@ finalize:
}
static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
+#endif
#ifdef CONFIG_ARCH_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 77dbe861473f..f6e842e2976e 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -50,6 +50,8 @@ struct rcar_sysc_info {
unsigned int num_areas;
};
+extern const struct rcar_sysc_info r8a7743_sysc_info;
+extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
new file mode 100644
index 000000000000..330960312296
--- /dev/null
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -0,0 +1,257 @@
+/*
+ * Renesas SoC Identification
+ *
+ * Copyright (C) 2014-2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+
+struct renesas_family {
+ const char name[16];
+ u32 reg; /* CCCR or PRR, if not in DT */
+};
+
+static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = {
+ .name = "R-Car Gen1",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = {
+ .name = "R-Car Gen2",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
+ .name = "R-Car Gen3",
+ .reg = 0xfff00044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
+ .name = "R-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+static const struct renesas_family fam_rza __initconst __maybe_unused = {
+ .name = "RZ/A",
+};
+
+static const struct renesas_family fam_rzg __initconst __maybe_unused = {
+ .name = "RZ/G",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
+ .name = "SH-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+
+struct renesas_soc {
+ const struct renesas_family *family;
+ u8 id;
+};
+
+static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
+ .family = &fam_rza,
+};
+
+static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x3f,
+};
+
+static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x40,
+};
+
+static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+};
+
+static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+ .id = 0x3b,
+};
+
+static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4a,
+};
+
+static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x4f,
+};
+
+static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x52,
+};
+
+static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
+ .family = &fam_shmobile,
+ .id = 0x37,
+};
+
+
+static const struct of_device_id renesas_socs[] __initconst = {
+#ifdef CONFIG_ARCH_R7S72100
+ { .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
+#endif
+#ifdef CONFIG_ARCH_R8A73A4
+ { .compatible = "renesas,r8a73a4", .data = &soc_rmobile_ape6 },
+#endif
+#ifdef CONFIG_ARCH_R8A7740
+ { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e },
+#endif
+#ifdef CONFIG_ARCH_R8A7778
+ { .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
+#endif
+#ifdef CONFIG_ARCH_R8A7779
+ { .compatible = "renesas,r8a7779", .data = &soc_rcar_h1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+ { .compatible = "renesas,r8a7790", .data = &soc_rcar_h2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+ { .compatible = "renesas,r8a7791", .data = &soc_rcar_m2_w },
+#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792", .data = &soc_rcar_v2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+ { .compatible = "renesas,r8a7793", .data = &soc_rcar_m2_n },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+ { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+ { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
+#endif
+#ifdef CONFIG_ARCH_R8A7796
+ { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_SH73A0
+ { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
+#endif
+ { /* sentinel */ }
+};
+
+static int __init renesas_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct renesas_family *family;
+ const struct of_device_id *match;
+ const struct renesas_soc *soc;
+ void __iomem *chipid = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ unsigned int product;
+
+ match = of_match_node(renesas_socs, of_root);
+ if (!match)
+ return -ENODEV;
+
+ soc = match->data;
+ family = soc->family;
+
+ /* Try PRR first, then hardcoded fallback */
+ np = of_find_compatible_node(NULL, NULL, "renesas,prr");
+ if (np) {
+ chipid = of_iomap(np, 0);
+ of_node_put(np);
+ } else if (soc->id) {
+ chipid = ioremap(family->reg, 4);
+ }
+ if (chipid) {
+ product = readl(chipid);
+ iounmap(chipid);
+ if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+ pr_warn("SoC mismatch (product = 0x%x)\n", product);
+ return -ENODEV;
+ }
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
+ GFP_KERNEL);
+ if (chipid)
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+ ((product >> 4) & 0x0f) + 1,
+ product & 0xf);
+
+ pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision ?: "");
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree_const(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ return 0;
+}
+core_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 7acd1517dd37..1c78c42416c6 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -9,6 +9,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
@@ -105,12 +106,24 @@ static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
return (val & pd_info->idle_mask) == pd_info->idle_mask;
}
+static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
+{
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ return val;
+}
+
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
const struct rockchip_domain_info *pd_info = pd->info;
+ struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int target_ack;
unsigned int val;
+ bool is_idle;
+ int ret;
if (pd_info->req_mask == 0)
return 0;
@@ -120,12 +133,26 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
dsb(sy);
- do {
- regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
- } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+ /* Wait util idle_ack = 1 */
+ target_ack = idle ? pd_info->ack_mask : 0;
+ ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
+ (val & pd_info->ack_mask) == target_ack,
+ 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get ack on domain '%s', val=0x%x\n",
+ genpd->name, val);
+ return ret;
+ }
- while (rockchip_pmu_domain_is_idle(pd) != idle)
- cpu_relax();
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
+ is_idle, is_idle == idle, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to set idle on domain '%s', val=%d\n",
+ genpd->name, is_idle);
+ return ret;
+ }
return 0;
}
@@ -198,6 +225,8 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ bool is_on;
if (pd->info->pwr_mask == 0)
return;
@@ -207,8 +236,13 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
dsb(sy);
- while (rockchip_pmu_domain_is_on(pd) != on)
- cpu_relax();
+ if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000)) {
+ dev_err(pmu->dev,
+ "failed to set domain '%s', val=%d\n",
+ genpd->name, is_on);
+ return;
+ }
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -445,7 +479,16 @@ err_out:
static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
{
- int i;
+ int i, ret;
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
for (i = 0; i < pd->num_clks; i++) {
clk_unprepare(pd->clks[i]);
@@ -597,10 +640,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
* Configure power up and down transition delays for CORE
* and GPU domains.
*/
- rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
- pmu_info->core_power_transition_time);
- rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
- pmu_info->gpu_power_transition_time);
+ if (pmu_info->core_power_transition_time)
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ if (pmu_info->gpu_pwrcnt_offset)
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
error = -ENODEV;
@@ -627,7 +672,11 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
goto err_out;
}
- of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ if (error) {
+ dev_err(dev, "failed to add provider: %d\n", error);
+ goto err_out;
+ }
return 0;
@@ -722,11 +771,7 @@ static const struct rockchip_pmu_info rk3399_pmu = {
.idle_offset = 0x64,
.ack_offset = 0x68,
- .core_pwrcnt_offset = 0x9c,
- .gpu_pwrcnt_offset = 0xa4,
-
- .core_power_transition_time = 24,
- .gpu_power_transition_time = 24,
+ /* ARM Trusted Firmware manages power transition times */
.num_domains = ARRAY_SIZE(rk3399_pm_domains),
.domain_info = rk3399_pm_domains,
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 03089ad2fc65..e5e124c07066 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -77,5 +77,19 @@ config ARCH_TEGRA_210_SOC
controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
name only a few.
+config ARCH_TEGRA_186_SOC
+ bool "NVIDIA Tegra186 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ help
+ Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+ combination of Denver and Cortex-A57 CPU cores and a GPU based on
+ the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+ used for audio processing, hardware video encoders/decoders with
+ multi-format support, ISP for image capture processing and BPMP for
+ power management.
+
endif
endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7792ed88d80b..e233dd5dcab3 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -45,29 +45,31 @@
#include <soc/tegra/pmc.h>
#define PMC_CNTRL 0x0
-#define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
-#define PMC_CNTRL_SYSCLK_OE (1 << 11) /* system clock enable */
-#define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */
-#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
-#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
-#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */
-#define PMC_CNTRL_MAIN_RST (1 << 4)
+#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
+#define PMC_CNTRL_CPU_PWRREQ_OE BIT(16) /* CPU pwr req enable */
+#define PMC_CNTRL_CPU_PWRREQ_POLARITY BIT(15) /* CPU pwr req polarity */
+#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
+#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
+#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_MAIN_RST BIT(4)
#define DPD_SAMPLE 0x020
-#define DPD_SAMPLE_ENABLE (1 << 0)
+#define DPD_SAMPLE_ENABLE BIT(0)
#define DPD_SAMPLE_DISABLE (0 << 0)
#define PWRGATE_TOGGLE 0x30
-#define PWRGATE_TOGGLE_START (1 << 8)
+#define PWRGATE_TOGGLE_START BIT(8)
#define REMOVE_CLAMPING 0x34
#define PWRGATE_STATUS 0x38
+#define PMC_PWR_DET 0x48
+
#define PMC_SCRATCH0 0x50
-#define PMC_SCRATCH0_MODE_RECOVERY (1 << 31)
-#define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30)
-#define PMC_SCRATCH0_MODE_RCM (1 << 1)
+#define PMC_SCRATCH0_MODE_RECOVERY BIT(31)
+#define PMC_SCRATCH0_MODE_BOOTLOADER BIT(30)
+#define PMC_SCRATCH0_MODE_RCM BIT(1)
#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \
PMC_SCRATCH0_MODE_BOOTLOADER | \
PMC_SCRATCH0_MODE_RCM)
@@ -75,11 +77,13 @@
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_PWR_DET_VALUE 0xe4
+
#define PMC_SCRATCH41 0x140
#define PMC_SENSOR_CTRL 0x1b0
-#define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2)
-#define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1)
+#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
+#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
#define PMC_RST_STATUS 0x1b4
#define PMC_RST_STATUS_POR 0
@@ -90,10 +94,10 @@
#define PMC_RST_STATUS_AOTAG 5
#define IO_DPD_REQ 0x1b8
-#define IO_DPD_REQ_CODE_IDLE (0 << 30)
-#define IO_DPD_REQ_CODE_OFF (1 << 30)
-#define IO_DPD_REQ_CODE_ON (2 << 30)
-#define IO_DPD_REQ_CODE_MASK (3 << 30)
+#define IO_DPD_REQ_CODE_IDLE (0U << 30)
+#define IO_DPD_REQ_CODE_OFF (1U << 30)
+#define IO_DPD_REQ_CODE_ON (2U << 30)
+#define IO_DPD_REQ_CODE_MASK (3U << 30)
#define IO_DPD_STATUS 0x1bc
#define IO_DPD2_REQ 0x1c0
@@ -101,16 +105,16 @@
#define SEL_DPD_TIM 0x1c8
#define PMC_SCRATCH54 0x258
-#define PMC_SCRATCH54_DATA_SHIFT 8
-#define PMC_SCRATCH54_ADDR_SHIFT 0
+#define PMC_SCRATCH54_DATA_SHIFT 8
+#define PMC_SCRATCH54_ADDR_SHIFT 0
#define PMC_SCRATCH55 0x25c
-#define PMC_SCRATCH55_RESET_TEGRA (1 << 31)
-#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
-#define PMC_SCRATCH55_PINMUX_SHIFT 24
-#define PMC_SCRATCH55_16BITOP (1 << 15)
-#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
-#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
+#define PMC_SCRATCH55_RESET_TEGRA BIT(31)
+#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
+#define PMC_SCRATCH55_PINMUX_SHIFT 24
+#define PMC_SCRATCH55_16BITOP BIT(15)
+#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
+#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
#define GPU_RG_CNTRL 0x2d4
@@ -124,6 +128,12 @@ struct tegra_powergate {
unsigned int num_resets;
};
+struct tegra_io_pad_soc {
+ enum tegra_io_pad id;
+ unsigned int dpd;
+ unsigned int voltage;
+};
+
struct tegra_pmc_soc {
unsigned int num_powergates;
const char *const *powergates;
@@ -132,6 +142,9 @@ struct tegra_pmc_soc {
bool has_tsense_reset;
bool has_gpu_clamps;
+
+ const struct tegra_io_pad_soc *io_pads;
+ unsigned int num_io_pads;
};
/**
@@ -238,8 +251,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return i;
}
- dev_err(pmc->dev, "powergate %s not found\n", name);
-
return -ENODEV;
}
@@ -456,13 +467,12 @@ disable_clks:
static int tegra_genpd_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
- struct tegra_pmc *pmc = pg->pmc;
int err;
err = tegra_powergate_power_up(pg, true);
if (err)
- dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n",
- pg->genpd.name, err);
+ pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
+ err);
return err;
}
@@ -470,13 +480,12 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
static int tegra_genpd_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
- struct tegra_pmc *pmc = pg->pmc;
int err;
err = tegra_powergate_power_down(pg);
if (err)
- dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n",
- pg->genpd.name, err);
+ pr_err("failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -801,8 +810,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- dev_err(pmc->dev, "powergate lookup failed for %s: %d\n",
- np->name, id);
+ pr_err("powergate lookup failed for %s: %d\n", np->name, id);
goto free_mem;
}
@@ -822,20 +830,22 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- dev_err(pmc->dev, "failed to get clocks for %s: %d\n",
- np->name, err);
+ pr_err("failed to get clocks for %s: %d\n", np->name, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- dev_err(pmc->dev, "failed to get resets for %s: %d\n",
- np->name, err);
+ pr_err("failed to get resets for %s: %d\n", np->name, err);
goto remove_clks;
}
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
- goto power_on_cleanup;
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
/*
* FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
@@ -846,25 +856,33 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
* to be unused.
*/
if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
- (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC))
- goto power_on_cleanup;
+ (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
- pm_genpd_init(&pg->genpd, NULL, off);
+ err = pm_genpd_init(&pg->genpd, NULL, off);
+ if (err < 0) {
+ pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ err);
+ goto remove_resets;
+ }
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n",
- np->name, err);
- goto remove_resets;
+ pr_err("failed to add PM domain provider for %s: %d\n",
+ np->name, err);
+ goto remove_genpd;
}
- dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
+ pr_debug("added PM domain %s\n", pg->genpd.name);
return;
-power_on_cleanup:
- if (off)
- WARN_ON(tegra_powergate_power_up(pg, true));
+remove_genpd:
+ pm_genpd_remove(&pg->genpd);
remove_resets:
while (pg->num_resets--)
@@ -908,21 +926,36 @@ static void tegra_powergate_init(struct tegra_pmc *pmc,
of_node_put(np);
}
-static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
- unsigned long *status, unsigned int *bit)
+static const struct tegra_io_pad_soc *
+tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->num_io_pads; i++)
+ if (pmc->soc->io_pads[i].id == id)
+ return &pmc->soc->io_pads[i];
+
+ return NULL;
+}
+
+static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
+ unsigned long *status, u32 *mask)
+{
+ const struct tegra_io_pad_soc *pad;
unsigned long rate, value;
- *bit = id % 32;
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ pr_err("invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
- /*
- * There are two sets of 30 bits to select IO rails, but bits 30 and
- * 31 are control bits rather than IO rail selection bits.
- */
- if (id > 63 || *bit == 30 || *bit == 31)
- return -EINVAL;
+ if (pad->dpd == UINT_MAX)
+ return -ENOTSUPP;
- if (id < 32) {
+ *mask = BIT(pad->dpd % 32);
+
+ if (pad->dpd < 32) {
*status = IO_DPD_STATUS;
*request = IO_DPD_REQ;
} else {
@@ -931,6 +964,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
}
rate = clk_get_rate(pmc->clk);
+ if (!rate) {
+ pr_err("failed to get clock rate\n");
+ return -ENODEV;
+ }
tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
@@ -942,10 +979,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
return 0;
}
-static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
- unsigned long val, unsigned long timeout)
+static int tegra_io_pad_poll(unsigned long offset, u32 mask,
+ u32 val, unsigned long timeout)
{
- unsigned long value;
+ u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -960,67 +997,164 @@ static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
return -ETIMEDOUT;
}
-static void tegra_io_rail_unprepare(void)
+static void tegra_io_pad_unprepare(void)
{
tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
-int tegra_io_rail_power_on(unsigned int id)
+/**
+ * tegra_io_pad_power_enable() - enable power to I/O pad
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_enable(enum tegra_io_pad id)
{
unsigned long request, status;
- unsigned int bit;
+ u32 mask;
int err;
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err)
- goto error;
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
- tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
- err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
- if (err) {
- pr_info("tegra_io_rail_poll() failed: %d\n", err);
- goto error;
+ err = tegra_io_pad_poll(status, mask, 0, 250);
+ if (err < 0) {
+ pr_err("failed to enable I/O pad: %d\n", err);
+ goto unlock;
}
- tegra_io_rail_unprepare();
+ tegra_io_pad_unprepare();
-error:
+unlock:
mutex_unlock(&pmc->powergates_lock);
-
return err;
}
-EXPORT_SYMBOL(tegra_io_rail_power_on);
+EXPORT_SYMBOL(tegra_io_pad_power_enable);
-int tegra_io_rail_power_off(unsigned int id)
+/**
+ * tegra_io_pad_power_disable() - disable power to I/O pad
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_disable(enum tegra_io_pad id)
{
unsigned long request, status;
- unsigned int bit;
+ u32 mask;
int err;
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err) {
- pr_info("tegra_io_rail_prepare() failed: %d\n", err);
- goto error;
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
- err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
- if (err)
- goto error;
+ err = tegra_io_pad_poll(status, mask, mask, 250);
+ if (err < 0) {
+ pr_err("failed to disable I/O pad: %d\n", err);
+ goto unlock;
+ }
- tegra_io_rail_unprepare();
+ tegra_io_pad_unprepare();
-error:
+unlock:
mutex_unlock(&pmc->powergates_lock);
-
return err;
}
+EXPORT_SYMBOL(tegra_io_pad_power_disable);
+
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+ enum tegra_io_pad_voltage voltage)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
+ value = tegra_pmc_readl(PMC_PWR_DET);
+ value |= BIT(pad->voltage);
+ tegra_pmc_writel(value, PMC_PWR_DET);
+
+ /* update I/O voltage */
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if (voltage == TEGRA_IO_PAD_1800000UV)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ usleep_range(100, 250);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_io_pad_set_voltage);
+
+int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if ((value & BIT(pad->voltage)) == 0)
+ return TEGRA_IO_PAD_1800000UV;
+
+ return TEGRA_IO_PAD_3300000UV;
+}
+EXPORT_SYMBOL(tegra_io_pad_get_voltage);
+
+/**
+ * tegra_io_rail_power_on() - enable power to I/O rail
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * See also: tegra_io_pad_power_enable()
+ */
+int tegra_io_rail_power_on(unsigned int id)
+{
+ return tegra_io_pad_power_enable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_on);
+
+/**
+ * tegra_io_rail_power_off() - disable power to I/O rail
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * See also: tegra_io_pad_power_disable()
+ */
+int tegra_io_rail_power_off(unsigned int id)
+{
+ return tegra_io_pad_power_disable(id);
+}
EXPORT_SYMBOL(tegra_io_rail_power_off);
#ifdef CONFIG_PM_SLEEP
@@ -1454,6 +1588,39 @@ static const u8 tegra124_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const struct tegra_io_pad_soc tegra124_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_BB, .dpd = 15, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_COMP, .dpd = 22, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HV, .dpd = 38, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_NAND, .dpd = 13, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SYS_DDC, .dpd = 58, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra124_powergates),
.powergates = tegra124_powergates,
@@ -1461,6 +1628,8 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.cpu_powergates = tegra124_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .num_io_pads = ARRAY_SIZE(tegra124_io_pads),
+ .io_pads = tegra124_io_pads,
};
static const char * const tegra210_powergates[] = {
@@ -1497,6 +1666,47 @@ static const u8 tegra210_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const struct tegra_io_pad_soc tegra210_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = 5 },
+ { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 18 },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = 10 },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIC, .dpd = 42, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSID, .dpd = 43, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIF, .dpd = 45, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = 19 },
+ { .id = TEGRA_IO_PAD_DEBUG_NONAO, .dpd = 26, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DMIC, .dpd = 50, .voltage = 20 },
+ { .id = TEGRA_IO_PAD_DP, .dpd = 51, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC2, .dpd = 37, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_GPIO, .dpd = 27, .voltage = 21 },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = UINT_MAX, .voltage = 11 },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = 12 },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = 13 },
+ { .id = TEGRA_IO_PAD_SPI, .dpd = 46, .voltage = 22 },
+ { .id = TEGRA_IO_PAD_SPI_HV, .dpd = 47, .voltage = 23 },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = 2 },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB3, .dpd = 18, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
@@ -1504,6 +1714,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.cpu_powergates = tegra210_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .num_io_pads = ARRAY_SIZE(tegra210_io_pads),
+ .io_pads = tegra210_io_pads,
};
static const struct of_device_id tegra_pmc_match[] = {