sched: idle: Respect the CPU system wakeup QoS limit for s2idle
A CPU system wakeup QoS limit may have been requested by user space. To avoid breaking this constraint when entering a low power state during s2idle, let's start to take into account the QoS limit. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dhruva Gole <d-gole@ti.com> Reviewed-by: Kevin Hilman (TI) <khilman@baylibre.com> Tested-by: Kevin Hilman (TI) <khilman@baylibre.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://patch.msgid.link/20251125112650.329269-5-ulf.hansson@linaro.org Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>pull/1354/merge
parent
e2e4695f01
commit
99b42445f4
|
|
@ -184,20 +184,22 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
|
|||
* cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
|
||||
* @drv: cpuidle driver for the given CPU.
|
||||
* @dev: cpuidle device for the given CPU.
|
||||
* @latency_limit_ns: Idle state exit latency limit
|
||||
*
|
||||
* If there are states with the ->enter_s2idle callback, find the deepest of
|
||||
* them and enter it with frozen tick.
|
||||
*/
|
||||
int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
u64 latency_limit_ns)
|
||||
{
|
||||
int index;
|
||||
|
||||
/*
|
||||
* Find the deepest state with ->enter_s2idle present, which guarantees
|
||||
* that interrupts won't be enabled when it exits and allows the tick to
|
||||
* be frozen safely.
|
||||
* Find the deepest state with ->enter_s2idle present that meets the
|
||||
* specified latency limit, which guarantees that interrupts won't be
|
||||
* enabled when it exits and allows the tick to be frozen safely.
|
||||
*/
|
||||
index = find_deepest_state(drv, dev, U64_MAX, 0, true);
|
||||
index = find_deepest_state(drv, dev, latency_limit_ns, 0, true);
|
||||
if (index > 0) {
|
||||
enter_s2idle_proper(drv, dev, index);
|
||||
local_irq_enable();
|
||||
|
|
|
|||
|
|
@ -248,7 +248,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
|||
struct cpuidle_device *dev,
|
||||
u64 latency_limit_ns);
|
||||
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
struct cpuidle_device *dev,
|
||||
u64 latency_limit_ns);
|
||||
extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
|
||||
#else
|
||||
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
|
|
@ -256,7 +257,8 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
|||
u64 latency_limit_ns)
|
||||
{return -ENODEV; }
|
||||
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
struct cpuidle_device *dev,
|
||||
u64 latency_limit_ns)
|
||||
{return -ENODEV; }
|
||||
static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -131,12 +131,13 @@ void __cpuidle default_idle_call(void)
|
|||
}
|
||||
|
||||
static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
struct cpuidle_device *dev,
|
||||
u64 max_latency_ns)
|
||||
{
|
||||
if (current_clr_polling_and_test())
|
||||
return -EBUSY;
|
||||
|
||||
return cpuidle_enter_s2idle(drv, dev);
|
||||
return cpuidle_enter_s2idle(drv, dev, max_latency_ns);
|
||||
}
|
||||
|
||||
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
|
|
@ -205,12 +206,13 @@ static void cpuidle_idle_call(void)
|
|||
u64 max_latency_ns;
|
||||
|
||||
if (idle_should_enter_s2idle()) {
|
||||
max_latency_ns = cpu_wakeup_latency_qos_limit() *
|
||||
NSEC_PER_USEC;
|
||||
|
||||
entered_state = call_cpuidle_s2idle(drv, dev);
|
||||
entered_state = call_cpuidle_s2idle(drv, dev,
|
||||
max_latency_ns);
|
||||
if (entered_state > 0)
|
||||
goto exit_idle;
|
||||
|
||||
max_latency_ns = U64_MAX;
|
||||
} else {
|
||||
max_latency_ns = dev->forced_idle_latency_limit_ns;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue