forked from luck/tmp_suning_uos_patched
Merge branches 'pm-core', 'pm-sleep', 'pm-pci' and 'pm-domains'
* pm-core: PM: runtime: Fix timer_expires data type on 32-bit arches PM: runtime: Remove link state checks in rpm_get/put_supplier() * pm-sleep: ACPI: EC: PM: Drop ec_no_wakeup check from acpi_ec_dispatch_gpe() ACPI: EC: PM: Flush EC work unconditionally after wakeup PM: hibernate: remove the bogus call to get_gendisk() in software_resume() PM: hibernate: Batch hibernate and resume IO requests * pm-pci: PCI/ACPI: Whitelist hotplug ports for D3 if power managed by ACPI * pm-domains: PM: domains: Allow to abort power off when no ->power_off() callback PM: domains: Rename power state enums for genpd
This commit is contained in:
commit
2cf9ba2905
|
@ -2011,20 +2011,16 @@ bool acpi_ec_dispatch_gpe(void)
|
|||
if (acpi_any_gpe_status_set(first_ec->gpe))
|
||||
return true;
|
||||
|
||||
if (ec_no_wakeup)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Dispatch the EC GPE in-band, but do not report wakeup in any case
|
||||
* to allow the caller to process events properly after that.
|
||||
*/
|
||||
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
|
||||
if (ret == ACPI_INTERRUPT_HANDLED) {
|
||||
if (ret == ACPI_INTERRUPT_HANDLED)
|
||||
pm_pr_dbg("ACPI EC GPE dispatched\n");
|
||||
|
||||
/* Flush the event and query workqueues. */
|
||||
acpi_ec_flush_work();
|
||||
}
|
||||
/* Flush the event and query workqueues. */
|
||||
acpi_ec_flush_work();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
|
|||
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
|
||||
#define genpd_unlock(p) p->lock_ops->unlock(p)
|
||||
|
||||
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
|
||||
#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
|
||||
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
|
||||
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
|
||||
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
|
||||
|
@ -222,7 +222,7 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
|
|||
* out of off and so update the idle time and vice
|
||||
* versa.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_ACTIVE) {
|
||||
if (genpd->status == GENPD_STATE_ON) {
|
||||
int state_idx = genpd->state_idx;
|
||||
|
||||
genpd->states[state_idx].idle_time =
|
||||
|
@ -497,6 +497,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
|||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended = 0;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
|
@ -544,26 +545,15 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
|||
if (!genpd->gov)
|
||||
genpd->state_idx = 0;
|
||||
|
||||
if (genpd->power_off) {
|
||||
int ret;
|
||||
/* Don't power off, if a child domain is waiting to power on. */
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the parent yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->status = GENPD_STATE_OFF;
|
||||
genpd_update_accounting(genpd);
|
||||
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
|
@ -616,7 +606,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
genpd->status = GPD_STATE_ACTIVE;
|
||||
genpd->status = GENPD_STATE_ON;
|
||||
genpd_update_accounting(genpd);
|
||||
|
||||
return 0;
|
||||
|
@ -961,7 +951,7 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|
|||
if (_genpd_power_off(genpd, false))
|
||||
return;
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->status = GENPD_STATE_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
genpd_sd_counter_dec(link->parent);
|
||||
|
@ -1007,8 +997,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
|
|||
}
|
||||
|
||||
_genpd_power_on(genpd, false);
|
||||
|
||||
genpd->status = GPD_STATE_ACTIVE;
|
||||
genpd->status = GENPD_STATE_ON;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1287,7 +1276,7 @@ static int genpd_restore_noirq(struct device *dev)
|
|||
* so make it appear as powered off to genpd_sync_power_on(),
|
||||
* so that it tries to power it on in case it was really off.
|
||||
*/
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->status = GENPD_STATE_OFF;
|
||||
|
||||
genpd_sync_power_on(genpd, true, 0);
|
||||
genpd_unlock(genpd);
|
||||
|
@ -1777,7 +1766,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
|||
genpd->gov = gov;
|
||||
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
||||
atomic_set(&genpd->sd_count, 0);
|
||||
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
||||
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
|
||||
genpd->device_count = 0;
|
||||
genpd->max_off_time_ns = -1;
|
||||
genpd->max_off_time_changed = true;
|
||||
|
@ -2804,8 +2793,8 @@ static int genpd_summary_one(struct seq_file *s,
|
|||
struct generic_pm_domain *genpd)
|
||||
{
|
||||
static const char * const status_lookup[] = {
|
||||
[GPD_STATE_ACTIVE] = "on",
|
||||
[GPD_STATE_POWER_OFF] = "off"
|
||||
[GENPD_STATE_ON] = "on",
|
||||
[GENPD_STATE_OFF] = "off"
|
||||
};
|
||||
struct pm_domain_data *pm_data;
|
||||
const char *kobj_path;
|
||||
|
@ -2883,8 +2872,8 @@ static int summary_show(struct seq_file *s, void *data)
|
|||
static int status_show(struct seq_file *s, void *data)
|
||||
{
|
||||
static const char * const status_lookup[] = {
|
||||
[GPD_STATE_ACTIVE] = "on",
|
||||
[GPD_STATE_POWER_OFF] = "off"
|
||||
[GENPD_STATE_ON] = "on",
|
||||
[GENPD_STATE_OFF] = "off"
|
||||
};
|
||||
|
||||
struct generic_pm_domain *genpd = s->private;
|
||||
|
@ -2897,7 +2886,7 @@ static int status_show(struct seq_file *s, void *data)
|
|||
if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
|
||||
goto exit;
|
||||
|
||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||
if (genpd->status == GENPD_STATE_OFF)
|
||||
seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
|
||||
genpd->state_idx);
|
||||
else
|
||||
|
@ -2940,7 +2929,7 @@ static int idle_states_show(struct seq_file *s, void *data)
|
|||
ktime_t delta = 0;
|
||||
s64 msecs;
|
||||
|
||||
if ((genpd->status == GPD_STATE_POWER_OFF) &&
|
||||
if ((genpd->status == GENPD_STATE_OFF) &&
|
||||
(genpd->state_idx == i))
|
||||
delta = ktime_sub(ktime_get(), genpd->accounting_time);
|
||||
|
||||
|
@ -2963,7 +2952,7 @@ static int active_time_show(struct seq_file *s, void *data)
|
|||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (genpd->status == GPD_STATE_ACTIVE)
|
||||
if (genpd->status == GENPD_STATE_ON)
|
||||
delta = ktime_sub(ktime_get(), genpd->accounting_time);
|
||||
|
||||
seq_printf(s, "%lld ms\n", ktime_to_ms(
|
||||
|
@ -2986,7 +2975,7 @@ static int total_idle_time_show(struct seq_file *s, void *data)
|
|||
|
||||
for (i = 0; i < genpd->state_count; i++) {
|
||||
|
||||
if ((genpd->status == GPD_STATE_POWER_OFF) &&
|
||||
if ((genpd->status == GENPD_STATE_OFF) &&
|
||||
(genpd->state_idx == i))
|
||||
delta = ktime_sub(ktime_get(), genpd->accounting_time);
|
||||
|
||||
|
|
|
@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev)
|
|||
device_links_read_lock_held()) {
|
||||
int retval;
|
||||
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
|
||||
READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME))
|
||||
continue;
|
||||
|
||||
retval = pm_runtime_get_sync(link->supplier);
|
||||
|
@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev)
|
|||
|
||||
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
||||
device_links_read_lock_held()) {
|
||||
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
|
||||
continue;
|
||||
|
||||
while (refcount_dec_not_one(&link->rpm_active))
|
||||
pm_runtime_put(link->supplier);
|
||||
|
|
|
@ -944,6 +944,16 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
|
|||
if (!dev->is_hotplug_bridge)
|
||||
return false;
|
||||
|
||||
/* Assume D3 support if the bridge is power-manageable by ACPI. */
|
||||
adev = ACPI_COMPANION(&dev->dev);
|
||||
if (!adev && !pci_dev_is_added(dev)) {
|
||||
adev = acpi_pci_find_companion(&dev->dev);
|
||||
ACPI_COMPANION_SET(&dev->dev, adev);
|
||||
}
|
||||
|
||||
if (adev && acpi_device_power_manageable(adev))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Look for a special _DSD property for the root port and if it
|
||||
* is set we know the hierarchy behind it supports D3 just fine.
|
||||
|
|
|
@ -590,7 +590,7 @@ struct dev_pm_info {
|
|||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
struct hrtimer suspend_timer;
|
||||
unsigned long timer_expires;
|
||||
u64 timer_expires;
|
||||
struct work_struct work;
|
||||
wait_queue_head_t wait_queue;
|
||||
struct wake_irq *wakeirq;
|
||||
|
|
|
@ -64,8 +64,8 @@
|
|||
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
GPD_STATE_POWER_OFF, /* PM domain is off */
|
||||
GENPD_STATE_ON = 0, /* PM domain is on */
|
||||
GENPD_STATE_OFF, /* PM domain is off */
|
||||
};
|
||||
|
||||
struct dev_power_governor {
|
||||
|
|
|
@ -948,17 +948,6 @@ static int software_resume(void)
|
|||
|
||||
/* Check if the device is there */
|
||||
swsusp_resume_device = name_to_dev_t(resume_file);
|
||||
|
||||
/*
|
||||
* name_to_dev_t is ineffective to verify parition if resume_file is in
|
||||
* integer format. (e.g. major:minor)
|
||||
*/
|
||||
if (isdigit(resume_file[0]) && resume_wait) {
|
||||
int partno;
|
||||
while (!get_gendisk(swsusp_resume_device, &partno))
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
if (!swsusp_resume_device) {
|
||||
/*
|
||||
* Some device discovery might still be in progress; we need
|
||||
|
|
|
@ -226,6 +226,7 @@ struct hib_bio_batch {
|
|||
atomic_t count;
|
||||
wait_queue_head_t wait;
|
||||
blk_status_t error;
|
||||
struct blk_plug plug;
|
||||
};
|
||||
|
||||
static void hib_init_batch(struct hib_bio_batch *hb)
|
||||
|
@ -233,6 +234,12 @@ static void hib_init_batch(struct hib_bio_batch *hb)
|
|||
atomic_set(&hb->count, 0);
|
||||
init_waitqueue_head(&hb->wait);
|
||||
hb->error = BLK_STS_OK;
|
||||
blk_start_plug(&hb->plug);
|
||||
}
|
||||
|
||||
static void hib_finish_batch(struct hib_bio_batch *hb)
|
||||
{
|
||||
blk_finish_plug(&hb->plug);
|
||||
}
|
||||
|
||||
static void hib_end_io(struct bio *bio)
|
||||
|
@ -294,6 +301,10 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
|
|||
|
||||
static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
|
||||
{
|
||||
/*
|
||||
* We are relying on the behavior of blk_plug that a thread with
|
||||
* a plug will flush the plug list before sleeping.
|
||||
*/
|
||||
wait_event(hb->wait, atomic_read(&hb->count) == 0);
|
||||
return blk_status_to_errno(hb->error);
|
||||
}
|
||||
|
@ -561,6 +572,7 @@ static int save_image(struct swap_map_handle *handle,
|
|||
nr_pages++;
|
||||
}
|
||||
err2 = hib_wait_io(&hb);
|
||||
hib_finish_batch(&hb);
|
||||
stop = ktime_get();
|
||||
if (!ret)
|
||||
ret = err2;
|
||||
|
@ -854,6 +866,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
|
|||
pr_info("Image saving done\n");
|
||||
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
|
||||
out_clean:
|
||||
hib_finish_batch(&hb);
|
||||
if (crc) {
|
||||
if (crc->thr)
|
||||
kthread_stop(crc->thr);
|
||||
|
@ -1084,6 +1097,7 @@ static int load_image(struct swap_map_handle *handle,
|
|||
nr_pages++;
|
||||
}
|
||||
err2 = hib_wait_io(&hb);
|
||||
hib_finish_batch(&hb);
|
||||
stop = ktime_get();
|
||||
if (!ret)
|
||||
ret = err2;
|
||||
|
@ -1447,6 +1461,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
}
|
||||
swsusp_show_speed(start, stop, nr_to_read, "Read");
|
||||
out_clean:
|
||||
hib_finish_batch(&hb);
|
||||
for (i = 0; i < ring_size; i++)
|
||||
free_page((unsigned long)page[i]);
|
||||
if (crc) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user