kernel_optimize_test/include/linux/pm_runtime.h
Mark Rutland 6aa7de0591 locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.

For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.

However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:

----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()

// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch

virtual patch

@ depends on patch @
expression E1, E2;
@@

- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)

@ depends on patch @
expression E;
@@

- ACCESS_ONCE(E)
+ READ_ONCE(E)
----

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-25 11:01:08 +02:00

280 lines
8.5 KiB
C

/*
* pm_runtime.h - Device run-time power management helper functions.
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*/
#ifndef _LINUX_PM_RUNTIME_H
#define _LINUX_PM_RUNTIME_H
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm.h>
#include <linux/jiffies.h>
/* Runtime PM flag argument bits */
#define RPM_ASYNC 0x01 /* Request is asynchronous */
#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
state change */
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
static inline bool queue_pm_work(struct work_struct *work)
{
return queue_work(pm_wq, work);
}
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
extern void pm_runtime_forbid(struct device *dev);
extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
}
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
static inline bool pm_runtime_callbacks_present(struct device *dev)
{
return !dev->power.no_callbacks;
}
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
WRITE_ONCE(dev->power.last_busy, jiffies);
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
}
#else /* !CONFIG_PM */
static inline bool queue_pm_work(struct work_struct *work) { return false; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
{
return 1;
}
static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
return -ENOSYS;
}
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
static inline int pm_runtime_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_AUTO);
}
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
static inline int pm_request_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev,
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
static inline int pm_runtime_set_suspended(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_SUSPENDED);
}
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
}
#endif