#include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/pm_runtime.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
 
        const struct clk_ops    *ops;
        struct clk_hw           *hw;
        struct module           *owner;
+       struct device           *dev;
        struct clk_core         *parent;
        const char              **parent_names;
        struct clk_core         **parents;
        struct hlist_node clks_node;
 };
 
+/***           runtime pm          ***/
+static int clk_pm_runtime_get(struct clk_core *core)
+{
+       int ret = 0;
+
+       if (!core->dev)
+               return 0;
+
+       ret = pm_runtime_get_sync(core->dev);
+       return ret < 0 ? ret : 0;
+}
+
+static void clk_pm_runtime_put(struct clk_core *core)
+{
+       if (!core->dev)
+               return;
+
+       pm_runtime_put_sync(core->dev);
+}
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
 
 static bool clk_core_is_prepared(struct clk_core *core)
 {
+       bool ret = false;
+
        /*
         * .is_prepared is optional for clocks that can prepare
         * fall back to software usage counter if it is missing
        if (!core->ops->is_prepared)
                return core->prepare_count;
 
-       return core->ops->is_prepared(core->hw);
+       if (!clk_pm_runtime_get(core)) {
+               ret = core->ops->is_prepared(core->hw);
+               clk_pm_runtime_put(core);
+       }
+
+       return ret;
 }
 
 static bool clk_core_is_enabled(struct clk_core *core)
 {
+       bool ret = false;
+
        /*
         * .is_enabled is only mandatory for clocks that gate
         * fall back to software usage counter if .is_enabled is missing
        if (!core->ops->is_enabled)
                return core->enable_count;
 
-       return core->ops->is_enabled(core->hw);
+       /*
+        * Check if clock controller's device is runtime active before
+        * calling .is_enabled callback. If not, assume that clock is
+        * disabled, because we might be called from atomic context, from
+        * which pm_runtime_get() is not allowed.
+        * This function is called mainly from clk_disable_unused_subtree,
+        * which ensures proper runtime pm activation of controller before
+        * taking enable spinlock, but the below check is needed if one tries
+        * to call it from other places.
+        */
+       if (core->dev) {
+               pm_runtime_get_noresume(core->dev);
+               if (!pm_runtime_active(core->dev)) {
+                       ret = false;
+                       goto done;
+               }
+       }
+
+       ret = core->ops->is_enabled(core->hw);
+done:
+       clk_pm_runtime_put(core);
+
+       return ret;
 }
 
 /***    helper functions   ***/
        if (core->ops->unprepare)
                core->ops->unprepare(core->hw);
 
+       clk_pm_runtime_put(core);
+
        trace_clk_unprepare_complete(core);
        clk_core_unprepare(core->parent);
 }
                return 0;
 
        if (core->prepare_count == 0) {
-               ret = clk_core_prepare(core->parent);
+               ret = clk_pm_runtime_get(core);
                if (ret)
                        return ret;
 
+               ret = clk_core_prepare(core->parent);
+               if (ret)
+                       goto runtime_put;
+
                trace_clk_prepare(core);
 
                if (core->ops->prepare)
 
                trace_clk_prepare_complete(core);
 
-               if (ret) {
-                       clk_core_unprepare(core->parent);
-                       return ret;
-               }
+               if (ret)
+                       goto unprepare;
        }
 
        core->prepare_count++;
 
        return 0;
+unprepare:
+       clk_core_unprepare(core->parent);
+runtime_put:
+       clk_pm_runtime_put(core);
+       return ret;
 }
 
 static int clk_core_prepare_lock(struct clk_core *core)
        if (core->flags & CLK_IGNORE_UNUSED)
                return;
 
+       if (clk_pm_runtime_get(core))
+               return;
+
        if (clk_core_is_prepared(core)) {
                trace_clk_unprepare(core);
                if (core->ops->unprepare_unused)
                        core->ops->unprepare(core->hw);
                trace_clk_unprepare_complete(core);
        }
+
+       clk_pm_runtime_put(core);
 }
 
 static void clk_disable_unused_subtree(struct clk_core *core)
        if (core->flags & CLK_OPS_PARENT_ENABLE)
                clk_core_prepare_enable(core->parent);
 
+       if (clk_pm_runtime_get(core))
+               goto unprepare_out;
+
        flags = clk_enable_lock();
 
        if (core->enable_count)
 
 unlock_out:
        clk_enable_unlock(flags);
+       clk_pm_runtime_put(core);
+unprepare_out:
        if (core->flags & CLK_OPS_PARENT_ENABLE)
                clk_core_disable_unprepare(core->parent);
 }
 static unsigned long clk_recalc(struct clk_core *core,
                                unsigned long parent_rate)
 {
-       if (core->ops->recalc_rate)
-               return core->ops->recalc_rate(core->hw, parent_rate);
-       return parent_rate;
+       unsigned long rate = parent_rate;
+
+       if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
+               rate = core->ops->recalc_rate(core->hw, parent_rate);
+               clk_pm_runtime_put(core);
+       }
+       return rate;
 }
 
 /**
 {
        struct clk_core *top, *fail_clk;
        unsigned long rate = req_rate;
+       int ret = 0;
 
        if (!core)
                return 0;
        if (!top)
                return -EINVAL;
 
+       ret = clk_pm_runtime_get(core);
+       if (ret)
+               return ret;
+
        /* notify that we are about to change rates */
        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
        if (fail_clk) {
                pr_debug("%s: failed to set %s rate\n", __func__,
                                fail_clk->name);
                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err;
        }
 
        /* change the rates */
        clk_change_rate(top);
 
        core->req_rate = req_rate;
+err:
+       clk_pm_runtime_put(core);
 
-       return 0;
+       return ret;
 }
 
 /**
                p_rate = parent->rate;
        }
 
+       ret = clk_pm_runtime_get(core);
+       if (ret)
+               goto out;
+
        /* propagate PRE_RATE_CHANGE notifications */
        ret = __clk_speculate_rates(core, p_rate);
 
        /* abort if a driver objects */
        if (ret & NOTIFY_STOP_MASK)
-               goto out;
+               goto runtime_put;
 
        /* do the re-parent */
        ret = __clk_set_parent(core, parent, p_index);
                __clk_recalc_accuracies(core);
        }
 
+runtime_put:
+       clk_pm_runtime_put(core);
 out:
        clk_prepare_unlock();
 
  */
 static int __clk_core_init(struct clk_core *core)
 {
-       int i, ret = 0;
+       int i, ret;
        struct clk_core *orphan;
        struct hlist_node *tmp2;
        unsigned long rate;
 
        clk_prepare_lock();
 
+       ret = clk_pm_runtime_get(core);
+       if (ret)
+               goto unlock;
+
        /* check to see if a clock with this name is already registered */
        if (clk_core_lookup(core->name)) {
                pr_debug("%s: clk %s already initialized\n",
 
        kref_init(&core->ref);
 out:
+       clk_pm_runtime_put(core);
+unlock:
        clk_prepare_unlock();
 
        if (!ret)
                goto fail_name;
        }
        core->ops = hw->init->ops;
+       if (dev && pm_runtime_enabled(dev))
+               core->dev = dev;
        if (dev && dev->driver)
                core->owner = dev->driver->owner;
        core->hw = hw;