aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ipq806x/patches-5.15/098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/ipq806x/patches-5.15/098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch')
-rw-r--r--target/linux/ipq806x/patches-5.15/098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch681
1 files changed, 681 insertions, 0 deletions
diff --git a/target/linux/ipq806x/patches-5.15/098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch b/target/linux/ipq806x/patches-5.15/098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch
new file mode 100644
index 0000000000..769686220b
--- /dev/null
+++ b/target/linux/ipq806x/patches-5.15/098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch
@@ -0,0 +1,681 @@
+From cc41a266280cad0b55319e614167c88dff344248 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Sat, 22 Feb 2020 16:33:10 +0100
+Subject: [PATCH 1/8] cpufreq: add Krait dedicated scaling driver
+
+This new driver is based on generic cpufreq-dt driver.
+Krait SoCs have 2-4 cpu and one shared L2 cache that can
+operate at different frequency based on the maximum cpu clk
+across all core.
+L2 frequency and voltage are scaled on every frequency change
+if needed. On Krait SoCs is present a bug that can cause
+transition problem between frequency bin, to workaround this
+on more than one transition, the L2 frequency is first set to the
+base rate and then to the target rate.
+The L2 frequency use the OPP framework and use the opp-level
+bindings to link the l2 freq to different cpu freq. This is needed
+as the Krait l2 clk are note mapped 1:1 to the core clks and some
+of the l2 clk is set based on a range of the cpu clks. If the driver
+find a broken config (for example no opp-level set) the l2 scaling is
+skipped.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+---
+ drivers/cpufreq/Kconfig.arm | 14 +-
+ drivers/cpufreq/Makefile | 2 +
+ drivers/cpufreq/qcom-cpufreq-krait.c | 589 +++++++++++++++++++++++++++
+ 3 files changed, 604 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/cpufreq/qcom-cpufreq-krait.c
+
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -150,6 +150,18 @@ config ARM_QCOM_CPUFREQ_HW
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
++config ARM_QCOM_CPUFREQ_KRAIT
++ tristate "CPU Frequency scaling support for Krait SoCs"
++ depends on ARCH_QCOM || COMPILE_TEST
++ select PM_OPP
++ select ARM_QCOM_CPUFREQ_NVMEM
++ help
++ This adds the CPUFreq driver for Qualcomm Krait SoC based boards.
++ This scale the cache clk and regulator based on the different cpu
++ clks when scaling the different cores clk.
++
++ If in doubt, say N.
++
+ config ARM_RASPBERRYPI_CPUFREQ
+ tristate "Raspberry Pi cpufreq support"
+ depends on CLK_RASPBERRYPI || COMPILE_TEST
+@@ -339,4 +351,4 @@ config ARM_PXA2xx_CPUFREQ
+ help
+ This add the CPUFreq driver support for Intel PXA2xx SOCs.
+
+- If in doubt, say N.
++ If in doubt, say N.
+\ No newline at end of file
+--- a/drivers/cpufreq/Makefile
++++ b/drivers/cpufreq/Makefile
+@@ -63,6 +63,7 @@ obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2
+ obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+ obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
+ obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
++obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRAIT) += qcom-cpufreq-krait.o
+ obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
+ obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
+ obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
+@@ -86,6 +87,7 @@ obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += te
+ obj-$(CONFIG_ARM_TEGRA194_CPUFREQ) += tegra194-cpufreq.o
+ obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
+ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
++obj-$(CONFIG_ARM_KRAIT_CPUFREQ) += krait-cpufreq.o
+
+
+ ##################################################################################
+--- /dev/null
++++ b/drivers/cpufreq/qcom-cpufreq-krait.c
+@@ -0,0 +1,603 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/clk.h>
++#include <linux/cpu.h>
++#include <linux/cpufreq.h>
++#include <linux/cpumask.h>
++#include <linux/err.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/pm_opp.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/slab.h>
++#include <linux/thermal.h>
++
++#include "cpufreq-dt.h"
++
++static struct platform_device *l2_pdev;
++
++struct private_data {
++ struct opp_table *opp_table;
++ struct device *cpu_dev;
++ const char *reg_name;
++ bool have_static_opps;
++};
++
++static int set_target(struct cpufreq_policy *policy, unsigned int index)
++{
++ struct private_data *priv = policy->driver_data;
++ unsigned long freq = policy->freq_table[index].frequency;
++ unsigned long target_freq = freq * 1000;
++ struct dev_pm_opp *opp;
++ unsigned int level;
++ int cpu, ret;
++
++ if (l2_pdev) {
++ int policy_cpu = policy->cpu;
++
++ /* find the max freq across all core */
++ for_each_present_cpu(cpu)
++ if (cpu != policy_cpu)
++ target_freq = max(
++ target_freq,
++ (unsigned long)cpufreq_quick_get(cpu));
++
++ opp = dev_pm_opp_find_freq_exact(priv->cpu_dev, target_freq,
++ true);
++ if (IS_ERR(opp)) {
++ dev_err(&l2_pdev->dev, "failed to find OPP for %ld\n",
++ target_freq);
++ return PTR_ERR(opp);
++ }
++ level = dev_pm_opp_get_level(opp);
++ dev_pm_opp_put(opp);
++
++ /*
++ * Hardware constraint:
++ * Krait CPU cannot operate at 384MHz with L2 at 1Ghz.
++ * Assume index 0 with the idle freq and level > 0 as
++ * any L2 freq > 384MHz.
++ * Skip CPU freq change in this corner case.
++ */
++ if (unlikely(index == 0 && level != 0)) {
++ dev_err(priv->cpu_dev, "Krait CPU can't operate at idle freq with L2 at 1GHz");
++ return -EINVAL;
++ }
++
++ opp = dev_pm_opp_find_level_exact(&l2_pdev->dev, level);
++ if (IS_ERR(opp)) {
++ dev_err(&l2_pdev->dev,
++ "failed to find level OPP for %d\n", level);
++ return PTR_ERR(opp);
++ }
++ target_freq = dev_pm_opp_get_freq(opp);
++ dev_pm_opp_put(opp);
++
++ ret = dev_pm_opp_set_rate(&l2_pdev->dev, target_freq);
++ if (ret)
++ return ret;
++ }
++
++ ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
++ if (ret)
++ return ret;
++
++ arch_set_freq_scale(policy->related_cpus, freq,
++ policy->cpuinfo.max_freq);
++
++ return 0;
++}
++
++/*
++ * An earlier version of opp-v1 bindings used to name the regulator
++ * "cpu0-supply", we still need to handle that for backwards compatibility.
++ */
++static const char *find_supply_name(struct device *dev)
++{
++ struct device_node *np;
++ struct property *pp;
++ int cpu = dev->id;
++ const char *name = NULL;
++
++ np = of_node_get(dev->of_node);
++
++ /* This must be valid for sure */
++ if (WARN_ON(!np))
++ return NULL;
++
++ /* Try "cpu0" for older DTs */
++ if (!cpu) {
++ pp = of_find_property(np, "cpu0-supply", NULL);
++ if (pp) {
++ name = "cpu0";
++ goto node_put;
++ }
++ }
++
++ pp = of_find_property(np, "cpu-supply", NULL);
++ if (pp) {
++ name = "cpu";
++ goto node_put;
++ }
++
++ dev_dbg(dev, "no regulator for cpu%d\n", cpu);
++node_put:
++ of_node_put(np);
++ return name;
++}
++
++static int resources_available(void)
++{
++ struct device *cpu_dev;
++ struct regulator *cpu_reg;
++ struct clk *cpu_clk;
++ int ret = 0;
++ const char *name;
++
++ cpu_dev = get_cpu_device(0);
++ if (!cpu_dev) {
++ pr_err("failed to get cpu0 device\n");
++ return -ENODEV;
++ }
++
++ cpu_clk = clk_get(cpu_dev, NULL);
++ ret = PTR_ERR_OR_ZERO(cpu_clk);
++ if (ret) {
++ /*
++ * If cpu's clk node is present, but clock is not yet
++ * registered, we should try defering probe.
++ */
++ if (ret == -EPROBE_DEFER)
++ dev_dbg(cpu_dev, "clock not ready, retry\n");
++ else
++ dev_err(cpu_dev, "failed to get clock: %d\n", ret);
++
++ return ret;
++ }
++
++ clk_put(cpu_clk);
++
++ name = find_supply_name(cpu_dev);
++ /* Platform doesn't require regulator */
++ if (!name)
++ return 0;
++
++ cpu_reg = regulator_get_optional(cpu_dev, name);
++ ret = PTR_ERR_OR_ZERO(cpu_reg);
++ if (ret) {
++ /*
++ * If cpu's regulator supply node is present, but regulator is
++ * not yet registered, we should try defering probe.
++ */
++ if (ret == -EPROBE_DEFER)
++ dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
++ else
++ dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
++
++ return ret;
++ }
++
++ regulator_put(cpu_reg);
++ return 0;
++}
++
++static int cpufreq_init(struct cpufreq_policy *policy)
++{
++ struct cpufreq_frequency_table *freq_table;
++ struct opp_table *opp_table = NULL;
++ unsigned int transition_latency;
++ struct private_data *priv;
++ struct device *cpu_dev;
++ bool fallback = false;
++ struct clk *cpu_clk;
++ const char *name;
++ int ret;
++
++ cpu_dev = get_cpu_device(policy->cpu);
++ if (!cpu_dev) {
++ pr_err("failed to get cpu%d device\n", policy->cpu);
++ return -ENODEV;
++ }
++
++ cpu_clk = clk_get(cpu_dev, NULL);
++ if (IS_ERR(cpu_clk)) {
++ ret = PTR_ERR(cpu_clk);
++ dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
++ return ret;
++ }
++
++ /* Get OPP-sharing information from "operating-points-v2" bindings */
++ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
++ if (ret) {
++ if (ret != -ENOENT)
++ goto out_put_clk;
++
++ /*
++ * operating-points-v2 not supported, fallback to old method of
++ * finding shared-OPPs for backward compatibility if the
++ * platform hasn't set sharing CPUs.
++ */
++ if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
++ fallback = true;
++ }
++
++ /*
++ * OPP layer will be taking care of regulators now, but it needs to know
++ * the name of the regulator first.
++ */
++ name = find_supply_name(cpu_dev);
++ if (name) {
++ opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
++ if (IS_ERR(opp_table)) {
++ ret = PTR_ERR(opp_table);
++ dev_err(cpu_dev,
++ "Failed to set regulator for cpu%d: %d\n",
++ policy->cpu, ret);
++ goto out_put_clk;
++ }
++ }
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ ret = -ENOMEM;
++ goto out_put_regulator;
++ }
++
++ priv->reg_name = name;
++ priv->opp_table = opp_table;
++
++ /*
++ * Initialize OPP tables for all policy->cpus. They will be shared by
++ * all CPUs which have marked their CPUs shared with OPP bindings.
++ *
++ * For platforms not using operating-points-v2 bindings, we do this
++ * before updating policy->cpus. Otherwise, we will end up creating
++ * duplicate OPPs for policy->cpus.
++ *
++ * OPPs might be populated at runtime, don't check for error here
++ */
++ if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
++ priv->have_static_opps = true;
++
++ /*
++ * But we need OPP table to function so if it is not there let's
++ * give platform code chance to provide it for us.
++ */
++ ret = dev_pm_opp_get_opp_count(cpu_dev);
++ if (ret <= 0) {
++ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
++ ret = -EPROBE_DEFER;
++ goto out_free_opp;
++ }
++
++ if (fallback) {
++ cpumask_setall(policy->cpus);
++
++ /*
++ * OPP tables are initialized only for policy->cpu, do it for
++ * others as well.
++ */
++ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
++ if (ret)
++ dev_err(cpu_dev,
++ "%s: failed to mark OPPs as shared: %d\n",
++ __func__, ret);
++ }
++
++ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
++ if (ret) {
++ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
++ goto out_free_opp;
++ }
++
++ priv->cpu_dev = cpu_dev;
++
++ policy->driver_data = priv;
++ policy->clk = cpu_clk;
++ policy->freq_table = freq_table;
++
++ policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
++
++ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
++ if (!transition_latency)
++ transition_latency = CPUFREQ_ETERNAL;
++
++ policy->cpuinfo.transition_latency = transition_latency;
++ policy->dvfs_possible_from_any_cpu = true;
++
++ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
++
++ return 0;
++
++out_free_opp:
++ if (priv->have_static_opps)
++ dev_pm_opp_of_cpumask_remove_table(policy->cpus);
++ kfree(priv);
++out_put_regulator:
++ if (name)
++ dev_pm_opp_put_regulators(opp_table);
++out_put_clk:
++ clk_put(cpu_clk);
++
++ return ret;
++}
++
++static int cpufreq_online(struct cpufreq_policy *policy)
++{
++ /* We did light-weight tear down earlier, nothing to do here */
++ return 0;
++}
++
++static int cpufreq_offline(struct cpufreq_policy *policy)
++{
++ /*
++ * Preserve policy->driver_data and don't free resources on light-weight
++ * tear down.
++ */
++ return 0;
++}
++
++static int cpufreq_exit(struct cpufreq_policy *policy)
++{
++ struct private_data *priv = policy->driver_data;
++
++ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
++ if (priv->have_static_opps)
++ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
++ if (priv->reg_name)
++ dev_pm_opp_put_regulators(priv->opp_table);
++
++ clk_put(policy->clk);
++ kfree(priv);
++
++ return 0;
++}
++
++static struct cpufreq_driver krait_cpufreq_driver = {
++ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
++ CPUFREQ_IS_COOLING_DEV,
++ .verify = cpufreq_generic_frequency_table_verify,
++ .target_index = set_target,
++ .get = cpufreq_generic_get,
++ .init = cpufreq_init,
++ .exit = cpufreq_exit,
++ .online = cpufreq_online,
++ .offline = cpufreq_offline,
++ .name = "krait-cpufreq",
++ .suspend = cpufreq_generic_suspend,
++};
++
++struct krait_data {
++ unsigned long idle_freq;
++ bool regulator_enabled;
++};
++
++static int krait_cache_set_opp(struct dev_pm_set_opp_data *data)
++{
++ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
++ struct dev_pm_opp_supply *supply = &data->new_opp.supplies[0];
++ struct regulator *reg = data->regulators[0];
++ struct clk *clk = data->clk;
++ struct krait_data *kdata;
++ unsigned long idle_freq;
++ int ret;
++
++ kdata = (struct krait_data *)dev_get_drvdata(data->dev);
++ idle_freq = kdata->idle_freq;
++
++ /* Scaling up? Scale voltage before frequency */
++ if (freq >= old_freq) {
++ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
++ supply->u_volt,
++ supply->u_volt_max);
++ if (ret)
++ goto exit;
++ }
++
++ /*
++ * Set to idle bin if switching from normal to high bin
++ * or vice versa. It has been notice that a bug is triggered
++ * in cache scaling when more than one bin is scaled, to fix
++ * this we first need to transition to the base rate and then
++ * to target rate
++ */
++ if (likely(freq != idle_freq && old_freq != idle_freq)) {
++ ret = clk_set_rate(clk, idle_freq);
++ if (ret)
++ goto exit;
++ }
++
++ ret = clk_set_rate(clk, freq);
++ if (ret)
++ goto exit;
++
++ /* Scaling down? Scale voltage after frequency */
++ if (freq < old_freq) {
++ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
++ supply->u_volt,
++ supply->u_volt_max);
++ }
++
++ if (unlikely(!kdata->regulator_enabled)) {
++ ret = regulator_enable(reg);
++ if (ret < 0)
++ dev_warn(data->dev, "Failed to enable regulator: %d", ret);
++ else
++ kdata->regulator_enabled = true;
++ }
++
++exit:
++ return ret;
++};
++
++static int krait_cache_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct krait_data *data;
++ struct opp_table *table;
++ struct dev_pm_opp *opp;
++ struct device *cpu_dev;
++ int ret;
++
++ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ table = dev_pm_opp_set_regulators(dev, (const char *[]){ "l2" }, 1);
++ if (IS_ERR(table)) {
++ ret = PTR_ERR(table);
++ if (ret != -EPROBE_DEFER)
++ dev_err(dev, "failed to set regulators %d\n", ret);
++
++ return ret;
++ }
++
++ ret = PTR_ERR_OR_ZERO(
++ dev_pm_opp_register_set_opp_helper(dev, krait_cache_set_opp));
++ if (ret)
++ return ret;
++
++ ret = dev_pm_opp_of_add_table(dev);
++ if (ret) {
++ dev_err(dev, "failed to parse L2 freq thresholds\n");
++ return ret;
++ }
++
++ opp = dev_pm_opp_find_freq_ceil(dev, &data->idle_freq);
++ dev_pm_opp_put(opp);
++
++ /*
++ * Check opp-level configuration
++ * At least 2 level must be set or the cache will always be scaled
++ * the idle freq causing some performance problem
++ *
++ * In case of invalid configuration, the l2 scaling is skipped
++ */
++ cpu_dev = get_cpu_device(0);
++ if (!cpu_dev) {
++ pr_err("failed to get cpu0 device\n");
++ return -ENODEV;
++ }
++
++ /*
++ * Check if we have at least opp-level 1, 0 should always be set to
++ * the idle freq
++ */
++ opp = dev_pm_opp_find_level_exact(dev, 1);
++ if (IS_ERR(opp)) {
++ dev_err(dev,
++ "Invalid configuration found of l2 opp. Can't find opp-level 1");
++ goto invalid_conf;
++ }
++ dev_pm_opp_put(opp);
++
++ /*
++ * Check if we have at least opp-level 1 in the cpu opp, 0 should always
++ * be set to the idle freq
++ */
++ opp = dev_pm_opp_find_level_exact(cpu_dev, 1);
++ if (IS_ERR(opp)) {
++ dev_err(dev,
++ "Invalid configuration found of cpu opp. Can't find opp-level 1");
++ goto invalid_conf;
++ }
++ dev_pm_opp_put(opp);
++
++ platform_set_drvdata(pdev, data);
++
++ /* The l2 scaling is enabled by linking the cpufreq driver */
++ l2_pdev = pdev;
++
++ return 0;
++
++invalid_conf:
++ dev_pm_opp_remove_table(dev);
++ dev_pm_opp_put_regulators(table);
++ dev_pm_opp_unregister_set_opp_helper(table);
++
++ return -EINVAL;
++};
++
++static int krait_cache_remove(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct opp_table *table = dev_pm_opp_get_opp_table(dev);
++
++ dev_pm_opp_remove_table(dev);
++ dev_pm_opp_put_regulators(table);
++ dev_pm_opp_unregister_set_opp_helper(table);
++
++ return 0;
++};
++
++static const struct of_device_id krait_cache_match_table[] = {
++ { .compatible = "qcom,krait-cache" },
++ {}
++};
++
++static struct platform_driver krait_cache_driver = {
++ .driver = {
++ .name = "krait-cache",
++ .of_match_table = krait_cache_match_table,
++ },
++ .probe = krait_cache_probe,
++ .remove = krait_cache_remove,
++};
++module_platform_driver(krait_cache_driver);
++
++static int krait_cpufreq_probe(struct platform_device *pdev)
++{
++ struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
++ int ret;
++
++ /*
++ * All per-cluster (CPUs sharing clock/voltages) initialization is done
++ * from ->init(). In probe(), we just need to make sure that clk and
++ * regulators are available. Else defer probe and retry.
++ *
++ * FIXME: Is checking this only for CPU0 sufficient ?
++ */
++ ret = resources_available();
++ if (ret)
++ return ret;
++
++ if (data) {
++ if (data->have_governor_per_policy)
++ krait_cpufreq_driver.flags |=
++ CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
++
++ krait_cpufreq_driver.resume = data->resume;
++ if (data->suspend)
++ krait_cpufreq_driver.suspend = data->suspend;
++ }
++
++ ret = cpufreq_register_driver(&krait_cpufreq_driver);
++ if (ret)
++ dev_err(&pdev->dev, "failed register driver: %d\n", ret);
++
++ return ret;
++}
++
++static int krait_cpufreq_remove(struct platform_device *pdev)
++{
++ cpufreq_unregister_driver(&krait_cpufreq_driver);
++ return 0;
++}
++
++static struct platform_driver krait_cpufreq_platdrv = {
++ .driver = {
++ .name = "krait-cpufreq",
++ },
++ .probe = krait_cpufreq_probe,
++ .remove = krait_cpufreq_remove,
++};
++module_platform_driver(krait_cpufreq_platdrv);
++
++MODULE_ALIAS("platform:krait-cpufreq");
++MODULE_AUTHOR("Ansuel Smith <ansuelsmth@gmail.com>");
++MODULE_DESCRIPTION("Dedicated Krait SoC cpufreq driver");
++MODULE_LICENSE("GPL");