]> Pileus Git - ~andy/linux/blobdiff - drivers/clk/clk.c
clk: get matching entry under lock in of_clk_init()
[~andy/linux] / drivers / clk / clk.c
index 54a191c5bbf0e3f4c1b23807c12a59cdb0d74031..2db08c01ef51d9de9b618282ba55134d97dfabe4 100644 (file)
@@ -559,6 +559,19 @@ struct clk *__clk_get_parent(struct clk *clk)
        return !clk ? NULL : clk->parent;
 }
 
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+       if (!clk || index >= clk->num_parents)
+               return NULL;
+       else if (!clk->parents)
+               return __clk_lookup(clk->parent_names[index]);
+       else if (!clk->parents[index])
+               return clk->parents[index] =
+                       __clk_lookup(clk->parent_names[index]);
+       else
+               return clk->parents[index];
+}
+
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
        return !clk ? 0 : clk->enable_count;
@@ -679,6 +692,55 @@ struct clk *__clk_lookup(const char *name)
        return NULL;
 }
 
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long *best_parent_rate,
+                             struct clk **best_parent_p)
+{
+       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       int i, num_parents;
+       unsigned long parent_rate, best = 0;
+
+       /* if NO_REPARENT flag set, pass through to current parent */
+       if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
+               parent = clk->parent;
+               if (clk->flags & CLK_SET_RATE_PARENT)
+                       best = __clk_round_rate(parent, rate);
+               else if (parent)
+                       best = __clk_get_rate(parent);
+               else
+                       best = __clk_get_rate(clk);
+               goto out;
+       }
+
+       /* find the parent that can provide the fastest rate <= rate */
+       num_parents = clk->num_parents;
+       for (i = 0; i < num_parents; i++) {
+               parent = clk_get_parent_by_index(clk, i);
+               if (!parent)
+                       continue;
+               if (clk->flags & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_round_rate(parent, rate);
+               else
+                       parent_rate = __clk_get_rate(parent);
+               if (parent_rate <= rate && parent_rate > best) {
+                       best_parent = parent;
+                       best = parent_rate;
+               }
+       }
+
+out:
+       if (best_parent)
+               *best_parent_p = best_parent;
+       *best_parent_rate = best;
+
+       return best;
+}
+
 /***        clk api        ***/
 
 void __clk_unprepare(struct clk *clk)
@@ -702,7 +764,7 @@ void __clk_unprepare(struct clk *clk)
 
 /**
  * clk_unprepare - undo preparation of a clock source
- * @clk: the clk being unprepare
+ * @clk: the clk being unprepared
  *
  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
@@ -869,27 +931,31 @@ EXPORT_SYMBOL_GPL(clk_enable);
 /**
  * __clk_round_rate - round the given rate for a clk
  * @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
  *
  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
  */
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long parent_rate = 0;
+       struct clk *parent;
 
        if (!clk)
                return 0;
 
-       if (!clk->ops->round_rate) {
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       return __clk_round_rate(clk->parent, rate);
-               else
-                       return clk->rate;
-       }
-
-       if (clk->parent)
-               parent_rate = clk->parent->rate;
-
-       return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+       parent = clk->parent;
+       if (parent)
+               parent_rate = parent->rate;
+
+       if (clk->ops->determine_rate)
+               return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
+                                               &parent);
+       else if (clk->ops->round_rate)
+               return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+       else if (clk->flags & CLK_SET_RATE_PARENT)
+               return __clk_round_rate(clk->parent, rate);
+       else
+               return clk->rate;
 }
 
 /**
@@ -956,7 +1022,7 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
  *
  * Walks the subtree of clks starting with clk and recalculates rates as it
  * goes.  Note that if a clk does not implement the .recalc_rate callback then
- * it is assumed that the clock will take on the rate of it's parent.
+ * it is assumed that the clock will take on the rate of its parent.
  *
  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
  * if necessary.
@@ -1014,6 +1080,114 @@ unsigned long clk_get_rate(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+{
+       u8 i;
+
+       if (!clk->parents)
+               clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+                                                               GFP_KERNEL);
+
+       /*
+        * find index of new parent clock using cached parent ptrs,
+        * or if not yet cached, use string name comparison and cache
+        * them now to avoid future calls to __clk_lookup.
+        */
+       for (i = 0; i < clk->num_parents; i++) {
+               if (clk->parents && clk->parents[i] == parent)
+                       break;
+               else if (!strcmp(clk->parent_names[i], parent->name)) {
+                       if (clk->parents)
+                               clk->parents[i] = __clk_lookup(parent->name);
+                       break;
+               }
+       }
+
+       return i;
+}
+
+static void clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+       /* avoid duplicate POST_RATE_CHANGE notifications */
+       if (new_parent->new_child == clk)
+               new_parent->new_child = NULL;
+
+       hlist_del(&clk->child_node);
+
+       if (new_parent)
+               hlist_add_head(&clk->child_node, &new_parent->children);
+       else
+               hlist_add_head(&clk->child_node, &clk_orphan_list);
+
+       clk->parent = new_parent;
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+       unsigned long flags;
+       int ret = 0;
+       struct clk *old_parent = clk->parent;
+
+       /*
+        * Migrate prepare state between parents and prevent race with
+        * clk_enable().
+        *
+        * If the clock is not prepared, then a race with
+        * clk_enable/disable() is impossible since we already have the
+        * prepare lock (future calls to clk_enable() need to be preceded by
+        * a clk_prepare()).
+        *
+        * If the clock is prepared, migrate the prepared state to the new
+        * parent and also protect against a race with clk_enable() by
+        * forcing the clock and the new parent on.  This ensures that all
+        * future calls to clk_enable() are practically NOPs with respect to
+        * hardware and software states.
+        *
+        * See also: Comment for clk_set_parent() below.
+        */
+       if (clk->prepare_count) {
+               __clk_prepare(parent);
+               clk_enable(parent);
+               clk_enable(clk);
+       }
+
+       /* update the clk tree topology */
+       flags = clk_enable_lock();
+       clk_reparent(clk, parent);
+       clk_enable_unlock(flags);
+
+       /* change clock input source */
+       if (parent && clk->ops->set_parent)
+               ret = clk->ops->set_parent(clk->hw, p_index);
+
+       if (ret) {
+               flags = clk_enable_lock();
+               clk_reparent(clk, old_parent);
+               clk_enable_unlock(flags);
+
+               if (clk->prepare_count) {
+                       clk_disable(clk);
+                       clk_disable(parent);
+                       __clk_unprepare(parent);
+               }
+               return ret;
+       }
+
+       /*
+        * Finish the migration of prepare state and undo the changes done
+        * for preventing a race with clk_enable().
+        */
+       if (clk->prepare_count) {
+               clk_disable(clk);
+               clk_disable(old_parent);
+               __clk_unprepare(old_parent);
+       }
+
+       /* update debugfs with new clk tree topology */
+       clk_debug_reparent(clk, parent);
+       return 0;
+}
+
 /**
  * __clk_speculate_rates
  * @clk: first clk in the subtree
@@ -1026,7 +1200,7 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
  * pre-rate change notifications and returns early if no clks in the
  * subtree have subscribed to the notifications.  Note that if a clk does not
  * implement the .recalc_rate callback then it is assumed that the clock will
- * take on the rate of it's parent.
+ * take on the rate of its parent.
  *
  * Caller must hold prepare_lock.
  */
@@ -1058,18 +1232,25 @@ out:
        return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
+static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
+                            struct clk *new_parent, u8 p_index)
 {
        struct clk *child;
 
        clk->new_rate = new_rate;
+       clk->new_parent = new_parent;
+       clk->new_parent_index = p_index;
+       /* include clk in new parent's PRE_RATE_CHANGE notifications */
+       clk->new_child = NULL;
+       if (new_parent && new_parent != clk->parent)
+               new_parent->new_child = clk;
 
        hlist_for_each_entry(child, &clk->children, child_node) {
                if (child->ops->recalc_rate)
                        child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
                else
                        child->new_rate = new_rate;
-               clk_calc_subtree(child, child->new_rate);
+               clk_calc_subtree(child, child->new_rate, NULL, 0);
        }
 }
 
@@ -1080,50 +1261,63 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 {
        struct clk *top = clk;
+       struct clk *old_parent, *parent;
        unsigned long best_parent_rate = 0;
        unsigned long new_rate;
+       u8 p_index = 0;
 
        /* sanity */
        if (IS_ERR_OR_NULL(clk))
                return NULL;
 
        /* save parent rate, if it exists */
-       if (clk->parent)
-               best_parent_rate = clk->parent->rate;
-
-       /* never propagate up to the parent */
-       if (!(clk->flags & CLK_SET_RATE_PARENT)) {
-               if (!clk->ops->round_rate) {
-                       clk->new_rate = clk->rate;
-                       return NULL;
-               }
-               new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+       parent = old_parent = clk->parent;
+       if (parent)
+               best_parent_rate = parent->rate;
+
+       /* find the closest rate and parent clk/rate */
+       if (clk->ops->determine_rate) {
+               new_rate = clk->ops->determine_rate(clk->hw, rate,
+                                                   &best_parent_rate,
+                                                   &parent);
+       } else if (clk->ops->round_rate) {
+               new_rate = clk->ops->round_rate(clk->hw, rate,
+                                               &best_parent_rate);
+       } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
+               /* pass-through clock without adjustable parent */
+               clk->new_rate = clk->rate;
+               return NULL;
+       } else {
+               /* pass-through clock with adjustable parent */
+               top = clk_calc_new_rates(parent, rate);
+               new_rate = parent->new_rate;
                goto out;
        }
 
-       /* need clk->parent from here on out */
-       if (!clk->parent) {
-               pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
+       /* some clocks must be gated to change parent */
+       if (parent != old_parent &&
+           (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+               pr_debug("%s: %s not gated but wants to reparent\n",
+                        __func__, clk->name);
                return NULL;
        }
 
-       if (!clk->ops->round_rate) {
-               top = clk_calc_new_rates(clk->parent, rate);
-               new_rate = clk->parent->new_rate;
-
-               goto out;
+       /* try finding the new parent index */
+       if (parent) {
+               p_index = clk_fetch_parent_index(clk, parent);
+               if (p_index == clk->num_parents) {
+                       pr_debug("%s: clk %s can not be parent of clk %s\n",
+                                __func__, parent->name, clk->name);
+                       return NULL;
+               }
        }
 
-       new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-
-       if (best_parent_rate != clk->parent->rate) {
-               top = clk_calc_new_rates(clk->parent, best_parent_rate);
-
-               goto out;
-       }
+       if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
+           best_parent_rate != parent->rate)
+               top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
-       clk_calc_subtree(clk, new_rate);
+       clk_calc_subtree(clk, new_rate, parent, p_index);
 
        return top;
 }
@@ -1135,7 +1329,7 @@ out:
  */
 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 {
-       struct clk *child, *fail_clk = NULL;
+       struct clk *child, *tmp_clk, *fail_clk = NULL;
        int ret = NOTIFY_DONE;
 
        if (clk->rate == clk->new_rate)
@@ -1148,9 +1342,19 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
        }
 
        hlist_for_each_entry(child, &clk->children, child_node) {
-               clk = clk_propagate_rate_change(child, event);
-               if (clk)
-                       fail_clk = clk;
+               /* Skip children who will be reparented to another clock */
+               if (child->new_parent && child->new_parent != clk)
+                       continue;
+               tmp_clk = clk_propagate_rate_change(child, event);
+               if (tmp_clk)
+                       fail_clk = tmp_clk;
+       }
+
+       /* handle the new child who might not be in clk->children yet */
+       if (clk->new_child) {
+               tmp_clk = clk_propagate_rate_change(clk->new_child, event);
+               if (tmp_clk)
+                       fail_clk = tmp_clk;
        }
 
        return fail_clk;
@@ -1168,6 +1372,10 @@ static void clk_change_rate(struct clk *clk)
 
        old_rate = clk->rate;
 
+       /* set parent */
+       if (clk->new_parent && clk->new_parent != clk->parent)
+               __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
+
        if (clk->parent)
                best_parent_rate = clk->parent->rate;
 
@@ -1182,8 +1390,16 @@ static void clk_change_rate(struct clk *clk)
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-       hlist_for_each_entry(child, &clk->children, child_node)
+       hlist_for_each_entry(child, &clk->children, child_node) {
+               /* Skip children who will be reparented to another clock */
+               if (child->new_parent && child->new_parent != clk)
+                       continue;
                clk_change_rate(child);
+       }
+
+       /* handle the new child who might not be in clk->children yet */
+       if (clk->new_child)
+               clk_change_rate(clk->new_child);
 }
 
 /**
@@ -1198,7 +1414,7 @@ static void clk_change_rate(struct clk *clk)
  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
  * after calling .round_rate then upstream parent propagation is ignored.  If
  * *parent_rate comes back with a new rate for clk's parent then we propagate
- * up to clk's parent and set it's rate.  Upward propagation will continue
+ * up to clk's parent and set its rate.  Upward propagation will continue
  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
  * .round_rate stops requesting changes to clk's parent_rate.
  *
@@ -1212,6 +1428,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        struct clk *top, *fail_clk;
        int ret = 0;
 
+       if (!clk)
+               return 0;
+
        /* prevent racing with updates to the clock topology */
        clk_prepare_lock();
 
@@ -1315,30 +1534,12 @@ static struct clk *__clk_init_parent(struct clk *clk)
                        kzalloc((sizeof(struct clk*) * clk->num_parents),
                                        GFP_KERNEL);
 
-       if (!clk->parents)
-               ret = __clk_lookup(clk->parent_names[index]);
-       else if (!clk->parents[index])
-               ret = clk->parents[index] =
-                       __clk_lookup(clk->parent_names[index]);
-       else
-               ret = clk->parents[index];
+       ret = clk_get_parent_by_index(clk, index);
 
 out:
        return ret;
 }
 
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
-{
-       hlist_del(&clk->child_node);
-
-       if (new_parent)
-               hlist_add_head(&clk->child_node, &new_parent->children);
-       else
-               hlist_add_head(&clk->child_node, &clk_orphan_list);
-
-       clk->parent = new_parent;
-}
-
 void __clk_reparent(struct clk *clk, struct clk *new_parent)
 {
        clk_reparent(clk, new_parent);
@@ -1346,98 +1547,6 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
        __clk_recalc_rates(clk, POST_RATE_CHANGE);
 }
 
-static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
-{
-       u8 i;
-
-       if (!clk->parents)
-               clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
-                                                               GFP_KERNEL);
-
-       /*
-        * find index of new parent clock using cached parent ptrs,
-        * or if not yet cached, use string name comparison and cache
-        * them now to avoid future calls to __clk_lookup.
-        */
-       for (i = 0; i < clk->num_parents; i++) {
-               if (clk->parents && clk->parents[i] == parent)
-                       break;
-               else if (!strcmp(clk->parent_names[i], parent->name)) {
-                       if (clk->parents)
-                               clk->parents[i] = __clk_lookup(parent->name);
-                       break;
-               }
-       }
-
-       return i;
-}
-
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
-{
-       unsigned long flags;
-       int ret = 0;
-       struct clk *old_parent = clk->parent;
-
-       /*
-        * Migrate prepare state between parents and prevent race with
-        * clk_enable().
-        *
-        * If the clock is not prepared, then a race with
-        * clk_enable/disable() is impossible since we already have the
-        * prepare lock (future calls to clk_enable() need to be preceded by
-        * a clk_prepare()).
-        *
-        * If the clock is prepared, migrate the prepared state to the new
-        * parent and also protect against a race with clk_enable() by
-        * forcing the clock and the new parent on.  This ensures that all
-        * future calls to clk_enable() are practically NOPs with respect to
-        * hardware and software states.
-        *
-        * See also: Comment for clk_set_parent() below.
-        */
-       if (clk->prepare_count) {
-               __clk_prepare(parent);
-               clk_enable(parent);
-               clk_enable(clk);
-       }
-
-       /* update the clk tree topology */
-       flags = clk_enable_lock();
-       clk_reparent(clk, parent);
-       clk_enable_unlock(flags);
-
-       /* change clock input source */
-       if (parent && clk->ops->set_parent)
-               ret = clk->ops->set_parent(clk->hw, p_index);
-
-       if (ret) {
-               flags = clk_enable_lock();
-               clk_reparent(clk, old_parent);
-               clk_enable_unlock(flags);
-
-               if (clk->prepare_count) {
-                       clk_disable(clk);
-                       clk_disable(parent);
-                       __clk_unprepare(parent);
-               }
-               return ret;
-       }
-
-       /*
-        * Finish the migration of prepare state and undo the changes done
-        * for preventing a race with clk_enable().
-        */
-       if (clk->prepare_count) {
-               clk_disable(clk);
-               clk_disable(old_parent);
-               __clk_unprepare(old_parent);
-       }
-
-       /* update debugfs with new clk tree topology */
-       clk_debug_reparent(clk, parent);
-       return 0;
-}
-
 /**
  * clk_set_parent - switch the parent of a mux clk
  * @clk: the mux clk whose input we are switching
@@ -1461,7 +1570,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
        u8 p_index = 0;
        unsigned long p_rate = 0;
 
-       if (!clk || !clk->ops)
+       if (!clk)
+               return 0;
+
+       if (!clk->ops)
                return -EINVAL;
 
        /* verify ops for for multi-parent clks */
@@ -1544,8 +1656,9 @@ int __clk_init(struct device *dev, struct clk *clk)
 
        /* check that clk_ops are sane.  See Documentation/clk.txt */
        if (clk->ops->set_rate &&
-                       !(clk->ops->round_rate && clk->ops->recalc_rate)) {
-               pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+           !((clk->ops->round_rate || clk->ops->determine_rate) &&
+             clk->ops->recalc_rate)) {
+               pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
                                __func__, clk->name);
                ret = -EINVAL;
                goto out;
@@ -1648,7 +1761,7 @@ int __clk_init(struct device *dev, struct clk *clk)
         * The .init callback is not used by any of the basic clock types, but
         * exists for weird hardware that must perform initialization magic.
         * Please consider other ways of solving initialization problems before
-        * using this callback, as it's use is discouraged.
+        * using this callback, as its use is discouraged.
         */
        if (clk->ops->init)
                clk->ops->init(clk->hw);
@@ -1675,7 +1788,7 @@ out:
  * very large numbers of clocks that need to be statically initialized.  It is
  * a layering violation to include clk-private.h from any code which implements
  * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.  Returns 0
+ * separate C file from the logic that implements its operations.  Returns 0
  * on success, otherwise an error code.
  */
 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
@@ -2115,13 +2228,13 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
  */
 void __init of_clk_init(const struct of_device_id *matches)
 {
+       const struct of_device_id *match;
        struct device_node *np;
 
        if (!matches)
                matches = __clk_of_table;
 
-       for_each_matching_node(np, matches) {
-               const struct of_device_id *match = of_match_node(matches, np);
+       for_each_matching_node_and_match(np, matches, &match) {
                of_clk_init_cb_t clk_init_cb = match->data;
                clk_init_cb(np);
        }