cgroup/cpuset: Introduce cpuset_cpus_allowed_locked()
cpuset_cpus_allowed() uses a reader lock that is sleepable under RT, which means it cannot be called inside raw_spin_lock_t context. Introduce a new cpuset_cpus_allowed_locked() helper that performs the same function as cpuset_cpus_allowed() except that the caller must have acquired the cpuset_mutex so that no further locking will be needed. Suggested-by: Waiman Long <longman@redhat.com> Signed-off-by: Pingfan Liu <piliu@redhat.com> Cc: Waiman Long <longman@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Koutný <mkoutny@suse.com> Cc: linux-kernel@vger.kernel.org To: cgroups@vger.kernel.org Reviewed-by: Chen Ridong <chenridong@huawei.com> Reviewed-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>pull/1354/merge
parent
a0131c3927
commit
1f38221511
|
|
@ -74,6 +74,7 @@ extern void inc_dl_tasks_cs(struct task_struct *task);
|
|||
extern void dec_dl_tasks_cs(struct task_struct *task);
|
||||
extern void cpuset_lock(void);
|
||||
extern void cpuset_unlock(void);
|
||||
extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
|
||||
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
|
||||
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
|
||||
extern bool cpuset_cpu_is_isolated(int cpu);
|
||||
|
|
@ -195,10 +196,16 @@ static inline void dec_dl_tasks_cs(struct task_struct *task) { }
|
|||
static inline void cpuset_lock(void) { }
|
||||
static inline void cpuset_unlock(void) { }
|
||||
|
||||
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
|
||||
struct cpumask *mask)
|
||||
{
|
||||
cpumask_copy(mask, task_cpu_possible_mask(p));
|
||||
}
|
||||
|
||||
static inline void cpuset_cpus_allowed(struct task_struct *p,
|
||||
struct cpumask *mask)
|
||||
{
|
||||
cpumask_copy(mask, task_cpu_possible_mask(p));
|
||||
cpuset_cpus_allowed_locked(p, mask);
|
||||
}
|
||||
|
||||
static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
|
||||
|
|
|
|||
|
|
@ -4160,24 +4160,13 @@ void __init cpuset_init_smp(void)
|
|||
BUG_ON(!cpuset_migrate_mm_wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
||||
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
||||
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
|
||||
*
|
||||
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
|
||||
* attached to the specified @tsk. Guaranteed to return some non-empty
|
||||
* subset of cpu_active_mask, even if this means going outside the
|
||||
* tasks cpuset, except when the task is in the top cpuset.
|
||||
**/
|
||||
|
||||
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
||||
/*
|
||||
* Return cpus_allowed mask from a task's cpuset.
|
||||
*/
|
||||
static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct cpuset *cs;
|
||||
|
||||
spin_lock_irqsave(&callback_lock, flags);
|
||||
|
||||
cs = task_cs(tsk);
|
||||
if (cs != &top_cpuset)
|
||||
guarantee_active_cpus(tsk, pmask);
|
||||
|
|
@ -4197,7 +4186,39 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
|||
if (!cpumask_intersects(pmask, cpu_active_mask))
|
||||
cpumask_copy(pmask, possible_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
|
||||
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
||||
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
|
||||
*
|
||||
* Similir to cpuset_cpus_allowed() except that the caller must have acquired
|
||||
* cpuset_mutex.
|
||||
*/
|
||||
void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
|
||||
{
|
||||
lockdep_assert_held(&cpuset_mutex);
|
||||
__cpuset_cpus_allowed_locked(tsk, pmask);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
|
||||
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
||||
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
|
||||
*
|
||||
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
|
||||
* attached to the specified @tsk. Guaranteed to return some non-empty
|
||||
* subset of cpu_active_mask, even if this means going outside the
|
||||
* tasks cpuset, except when the task is in the top cpuset.
|
||||
**/
|
||||
|
||||
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&callback_lock, flags);
|
||||
__cpuset_cpus_allowed_locked(tsk, pmask);
|
||||
spin_unlock_irqrestore(&callback_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue