4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/device.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
22 #include <media/v4l2-clk.h>
23 #include <media/v4l2-subdev.h>
25 static DEFINE_MUTEX(clk_lock);
26 static LIST_HEAD(clk_list);
28 static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
32 list_for_each_entry(clk, &clk_list, list)
33 if (!strcmp(dev_id, clk->dev_id))
36 return ERR_PTR(-ENODEV);
39 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
42 struct clk *ccf_clk = clk_get(dev, id);
43 char clk_name[V4L2_CLK_NAME_SIZE];
45 if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
46 return ERR_PTR(-EPROBE_DEFER);
48 if (!IS_ERR_OR_NULL(ccf_clk)) {
49 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
52 return ERR_PTR(-ENOMEM);
59 mutex_lock(&clk_lock);
60 clk = v4l2_clk_find(dev_name(dev));
62 /* if dev_name is not found, try use the OF name to find again */
63 if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
64 v4l2_clk_name_of(clk_name, sizeof(clk_name),
65 of_node_full_name(dev->of_node));
66 clk = v4l2_clk_find(clk_name);
70 atomic_inc(&clk->use_count);
71 mutex_unlock(&clk_lock);
75 EXPORT_SYMBOL(v4l2_clk_get);
77 void v4l2_clk_put(struct v4l2_clk *clk)
90 mutex_lock(&clk_lock);
92 list_for_each_entry(tmp, &clk_list, list)
94 atomic_dec(&clk->use_count);
96 mutex_unlock(&clk_lock);
98 EXPORT_SYMBOL(v4l2_clk_put);
100 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
102 struct v4l2_clk *tmp;
105 mutex_lock(&clk_lock);
107 list_for_each_entry(tmp, &clk_list, list)
109 ret = !try_module_get(clk->ops->owner);
115 mutex_unlock(&clk_lock);
120 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
122 module_put(clk->ops->owner);
125 int v4l2_clk_enable(struct v4l2_clk *clk)
130 return clk_prepare_enable(clk->clk);
132 ret = v4l2_clk_lock_driver(clk);
136 mutex_lock(&clk->lock);
138 if (++clk->enable == 1 && clk->ops->enable) {
139 ret = clk->ops->enable(clk);
144 mutex_unlock(&clk->lock);
148 EXPORT_SYMBOL(v4l2_clk_enable);
151 * You might Oops if you try to disabled a disabled clock, because then the
152 * driver isn't locked and could have been unloaded by now, so, don't do that
154 void v4l2_clk_disable(struct v4l2_clk *clk)
159 return clk_disable_unprepare(clk->clk);
161 mutex_lock(&clk->lock);
163 enable = --clk->enable;
164 if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
167 else if (!enable && clk->ops->disable)
168 clk->ops->disable(clk);
170 mutex_unlock(&clk->lock);
172 v4l2_clk_unlock_driver(clk);
174 EXPORT_SYMBOL(v4l2_clk_disable);
176 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
181 return clk_get_rate(clk->clk);
183 ret = v4l2_clk_lock_driver(clk);
187 mutex_lock(&clk->lock);
188 if (!clk->ops->get_rate)
191 ret = clk->ops->get_rate(clk);
192 mutex_unlock(&clk->lock);
194 v4l2_clk_unlock_driver(clk);
198 EXPORT_SYMBOL(v4l2_clk_get_rate);
200 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
205 long r = clk_round_rate(clk->clk, rate);
208 return clk_set_rate(clk->clk, r);
211 ret = v4l2_clk_lock_driver(clk);
216 mutex_lock(&clk->lock);
217 if (!clk->ops->set_rate)
220 ret = clk->ops->set_rate(clk, rate);
221 mutex_unlock(&clk->lock);
223 v4l2_clk_unlock_driver(clk);
227 EXPORT_SYMBOL(v4l2_clk_set_rate);
229 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
233 struct v4l2_clk *clk;
237 return ERR_PTR(-EINVAL);
239 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
241 return ERR_PTR(-ENOMEM);
243 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
250 atomic_set(&clk->use_count, 0);
251 mutex_init(&clk->lock);
253 mutex_lock(&clk_lock);
254 if (!IS_ERR(v4l2_clk_find(dev_id))) {
255 mutex_unlock(&clk_lock);
259 list_add_tail(&clk->list, &clk_list);
260 mutex_unlock(&clk_lock);
270 EXPORT_SYMBOL(v4l2_clk_register);
272 void v4l2_clk_unregister(struct v4l2_clk *clk)
274 if (WARN(atomic_read(&clk->use_count),
275 "%s(): Refusing to unregister ref-counted %s clock!\n",
276 __func__, clk->dev_id))
279 mutex_lock(&clk_lock);
280 list_del(&clk->list);
281 mutex_unlock(&clk_lock);
286 EXPORT_SYMBOL(v4l2_clk_unregister);
288 struct v4l2_clk_fixed {
290 struct v4l2_clk_ops ops;
293 static unsigned long fixed_get_rate(struct v4l2_clk *clk)
295 struct v4l2_clk_fixed *priv = clk->priv;
299 struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
300 unsigned long rate, struct module *owner)
302 struct v4l2_clk *clk;
303 struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
306 return ERR_PTR(-ENOMEM);
309 priv->ops.get_rate = fixed_get_rate;
310 priv->ops.owner = owner;
312 clk = v4l2_clk_register(&priv->ops, dev_id, priv);
318 EXPORT_SYMBOL(__v4l2_clk_register_fixed);
320 void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
323 v4l2_clk_unregister(clk);
325 EXPORT_SYMBOL(v4l2_clk_unregister_fixed);