OMAP: clock: bail out early if arch_clock functions not implemented
Bail out before we take the clockfw_lock spinlock if the corresponding OMAP1 or OMAP2+ clock function is not defined. The intention is to reduce and simplify the work that is done inside the spinlock. Signed-off-by: Paul Walmsley <paul@pwsan.com>
This commit is contained in:
@@ -37,13 +37,15 @@ static struct clk_functions *arch_clock;
|
|||||||
int clk_enable(struct clk *clk)
|
int clk_enable(struct clk *clk)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
if (clk == NULL || IS_ERR(clk))
|
if (clk == NULL || IS_ERR(clk))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_enable)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (arch_clock->clk_enable)
|
|
||||||
ret = arch_clock->clk_enable(clk);
|
ret = arch_clock->clk_enable(clk);
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
|
|
||||||
@@ -58,6 +60,9 @@ void clk_disable(struct clk *clk)
|
|||||||
if (clk == NULL || IS_ERR(clk))
|
if (clk == NULL || IS_ERR(clk))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_disable)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (clk->usecount == 0) {
|
if (clk->usecount == 0) {
|
||||||
pr_err("Trying disable clock %s with 0 usecount\n",
|
pr_err("Trying disable clock %s with 0 usecount\n",
|
||||||
@@ -66,7 +71,6 @@ void clk_disable(struct clk *clk)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (arch_clock->clk_disable)
|
|
||||||
arch_clock->clk_disable(clk);
|
arch_clock->clk_disable(clk);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@@ -77,7 +81,7 @@ EXPORT_SYMBOL(clk_disable);
|
|||||||
unsigned long clk_get_rate(struct clk *clk)
|
unsigned long clk_get_rate(struct clk *clk)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long ret = 0;
|
unsigned long ret;
|
||||||
|
|
||||||
if (clk == NULL || IS_ERR(clk))
|
if (clk == NULL || IS_ERR(clk))
|
||||||
return 0;
|
return 0;
|
||||||
@@ -97,13 +101,15 @@ EXPORT_SYMBOL(clk_get_rate);
|
|||||||
long clk_round_rate(struct clk *clk, unsigned long rate)
|
long clk_round_rate(struct clk *clk, unsigned long rate)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
long ret = 0;
|
long ret;
|
||||||
|
|
||||||
if (clk == NULL || IS_ERR(clk))
|
if (clk == NULL || IS_ERR(clk))
|
||||||
return ret;
|
return 0;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_round_rate)
|
||||||
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (arch_clock->clk_round_rate)
|
|
||||||
ret = arch_clock->clk_round_rate(clk, rate);
|
ret = arch_clock->clk_round_rate(clk, rate);
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
|
|
||||||
@@ -119,14 +125,13 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
|
|||||||
if (clk == NULL || IS_ERR(clk))
|
if (clk == NULL || IS_ERR(clk))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_set_rate)
|
||||||
|
return ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (arch_clock->clk_set_rate)
|
|
||||||
ret = arch_clock->clk_set_rate(clk, rate);
|
ret = arch_clock->clk_set_rate(clk, rate);
|
||||||
if (ret == 0) {
|
if (ret == 0)
|
||||||
if (clk->recalc)
|
|
||||||
clk->rate = clk->recalc(clk);
|
|
||||||
propagate_rate(clk);
|
propagate_rate(clk);
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -141,15 +146,14 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
|
|||||||
if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
|
if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_set_parent)
|
||||||
|
return ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (clk->usecount == 0) {
|
if (clk->usecount == 0) {
|
||||||
if (arch_clock->clk_set_parent)
|
|
||||||
ret = arch_clock->clk_set_parent(clk, parent);
|
ret = arch_clock->clk_set_parent(clk, parent);
|
||||||
if (ret == 0) {
|
if (ret == 0)
|
||||||
if (clk->recalc)
|
|
||||||
clk->rate = clk->recalc(clk);
|
|
||||||
propagate_rate(clk);
|
propagate_rate(clk);
|
||||||
}
|
|
||||||
} else
|
} else
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
@@ -399,8 +403,10 @@ void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_init_cpufreq_table)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (arch_clock->clk_init_cpufreq_table)
|
|
||||||
arch_clock->clk_init_cpufreq_table(table);
|
arch_clock->clk_init_cpufreq_table(table);
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
}
|
}
|
||||||
@@ -409,8 +415,10 @@ void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_exit_cpufreq_table)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (arch_clock->clk_exit_cpufreq_table)
|
|
||||||
arch_clock->clk_exit_cpufreq_table(table);
|
arch_clock->clk_exit_cpufreq_table(table);
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
}
|
}
|
||||||
@@ -429,6 +437,9 @@ static int __init clk_disable_unused(void)
|
|||||||
struct clk *ck;
|
struct clk *ck;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!arch_clock || !arch_clock->clk_disable_unused)
|
||||||
|
return 0;
|
||||||
|
|
||||||
pr_info("clock: disabling unused clocks to save power\n");
|
pr_info("clock: disabling unused clocks to save power\n");
|
||||||
list_for_each_entry(ck, &clocks, node) {
|
list_for_each_entry(ck, &clocks, node) {
|
||||||
if (ck->ops == &clkops_null)
|
if (ck->ops == &clkops_null)
|
||||||
@@ -438,7 +449,6 @@ static int __init clk_disable_unused(void)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock_irqsave(&clockfw_lock, flags);
|
spin_lock_irqsave(&clockfw_lock, flags);
|
||||||
if (arch_clock->clk_disable_unused)
|
|
||||||
arch_clock->clk_disable_unused(ck);
|
arch_clock->clk_disable_unused(ck);
|
||||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user