mm/mmu_notifiers: Use 'interval_sub' as the variable for mmu_interval_notifier
The 'interval_sub' is placed on the 'notifier_subscriptions' interval tree. This eliminates the poor name 'mni' for this variable. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>pull/759/merge
parent
1991722a70
commit
5292e24a6a
|
|
@ -149,14 +149,14 @@ CPU page table into a device page table; HMM helps keep both synchronized. A
|
||||||
device driver that wants to mirror a process address space must start with the
|
device driver that wants to mirror a process address space must start with the
|
||||||
registration of a mmu_interval_notifier::
|
registration of a mmu_interval_notifier::
|
||||||
|
|
||||||
mni->ops = &driver_ops;
|
int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
|
||||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
|
struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long start, unsigned long length,
|
unsigned long length,
|
||||||
struct mm_struct *mm);
|
const struct mmu_interval_notifier_ops *ops);
|
||||||
|
|
||||||
During the driver_ops->invalidate() callback the device driver must perform
|
During the ops->invalidate() callback the device driver must perform the
|
||||||
the update action to the range (mark range read only, or fully unmap,
|
update action to the range (mark range read only, or fully unmap, etc.). The
|
||||||
etc.). The device must complete the update before the driver callback returns.
|
device must complete the update before the driver callback returns.
|
||||||
|
|
||||||
When the device driver wants to populate a range of virtual addresses, it can
|
When the device driver wants to populate a range of virtual addresses, it can
|
||||||
use::
|
use::
|
||||||
|
|
@ -183,7 +183,7 @@ The usage pattern is::
|
||||||
struct hmm_range range;
|
struct hmm_range range;
|
||||||
...
|
...
|
||||||
|
|
||||||
range.notifier = &mni;
|
range.notifier = &interval_sub;
|
||||||
range.start = ...;
|
range.start = ...;
|
||||||
range.end = ...;
|
range.end = ...;
|
||||||
range.pfns = ...;
|
range.pfns = ...;
|
||||||
|
|
@ -191,11 +191,11 @@ The usage pattern is::
|
||||||
range.values = ...;
|
range.values = ...;
|
||||||
range.pfn_shift = ...;
|
range.pfn_shift = ...;
|
||||||
|
|
||||||
if (!mmget_not_zero(mni->notifier.mm))
|
if (!mmget_not_zero(interval_sub->notifier.mm))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
range.notifier_seq = mmu_interval_read_begin(&mni);
|
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
|
ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
||||||
|
|
@ -237,7 +237,7 @@ struct mmu_notifier {
|
||||||
* was required but mmu_notifier_range_blockable(range) is false.
|
* was required but mmu_notifier_range_blockable(range) is false.
|
||||||
*/
|
*/
|
||||||
struct mmu_interval_notifier_ops {
|
struct mmu_interval_notifier_ops {
|
||||||
bool (*invalidate)(struct mmu_interval_notifier *mni,
|
bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
|
||||||
const struct mmu_notifier_range *range,
|
const struct mmu_notifier_range *range,
|
||||||
unsigned long cur_seq);
|
unsigned long cur_seq);
|
||||||
};
|
};
|
||||||
|
|
@ -292,20 +292,21 @@ extern int __mmu_notifier_register(struct mmu_notifier *subscription,
|
||||||
extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
|
extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
|
||||||
struct mm_struct *mm);
|
struct mm_struct *mm);
|
||||||
|
|
||||||
unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
|
unsigned long
|
||||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
|
mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
|
||||||
|
int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
|
||||||
struct mm_struct *mm, unsigned long start,
|
struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long length,
|
unsigned long length,
|
||||||
const struct mmu_interval_notifier_ops *ops);
|
const struct mmu_interval_notifier_ops *ops);
|
||||||
int mmu_interval_notifier_insert_locked(
|
int mmu_interval_notifier_insert_locked(
|
||||||
struct mmu_interval_notifier *mni, struct mm_struct *mm,
|
struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long length,
|
unsigned long start, unsigned long length,
|
||||||
const struct mmu_interval_notifier_ops *ops);
|
const struct mmu_interval_notifier_ops *ops);
|
||||||
void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
|
void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmu_interval_set_seq - Save the invalidation sequence
|
* mmu_interval_set_seq - Save the invalidation sequence
|
||||||
* @mni - The mni passed to invalidate
|
* @interval_sub - The subscription passed to invalidate
|
||||||
* @cur_seq - The cur_seq passed to the invalidate() callback
|
* @cur_seq - The cur_seq passed to the invalidate() callback
|
||||||
*
|
*
|
||||||
* This must be called unconditionally from the invalidate callback of a
|
* This must be called unconditionally from the invalidate callback of a
|
||||||
|
|
@ -316,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
|
||||||
* If the caller does not call mmu_interval_read_begin() or
|
* If the caller does not call mmu_interval_read_begin() or
|
||||||
* mmu_interval_read_retry() then this call is not required.
|
* mmu_interval_read_retry() then this call is not required.
|
||||||
*/
|
*/
|
||||||
static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
|
static inline void
|
||||||
unsigned long cur_seq)
|
mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
|
||||||
|
unsigned long cur_seq)
|
||||||
{
|
{
|
||||||
WRITE_ONCE(mni->invalidate_seq, cur_seq);
|
WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmu_interval_read_retry - End a read side critical section against a VA range
|
* mmu_interval_read_retry - End a read side critical section against a VA range
|
||||||
* mni: The range
|
* interval_sub: The subscription
|
||||||
* seq: The return of the paired mmu_interval_read_begin()
|
* seq: The return of the paired mmu_interval_read_begin()
|
||||||
*
|
*
|
||||||
* This MUST be called under a user provided lock that is also held
|
* This MUST be called under a user provided lock that is also held
|
||||||
|
|
@ -336,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
|
||||||
* Returns true if an invalidation collided with this critical section, and
|
* Returns true if an invalidation collided with this critical section, and
|
||||||
* the caller should retry.
|
* the caller should retry.
|
||||||
*/
|
*/
|
||||||
static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
|
static inline bool
|
||||||
unsigned long seq)
|
mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
|
||||||
|
unsigned long seq)
|
||||||
{
|
{
|
||||||
return mni->invalidate_seq != seq;
|
return interval_sub->invalidate_seq != seq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmu_interval_check_retry - Test if a collision has occurred
|
* mmu_interval_check_retry - Test if a collision has occurred
|
||||||
* mni: The range
|
* interval_sub: The subscription
|
||||||
* seq: The return of the matching mmu_interval_read_begin()
|
* seq: The return of the matching mmu_interval_read_begin()
|
||||||
*
|
*
|
||||||
* This can be used in the critical section between mmu_interval_read_begin()
|
* This can be used in the critical section between mmu_interval_read_begin()
|
||||||
|
|
@ -359,11 +362,12 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
|
||||||
* This call can be used as part of loops and other expensive operations to
|
* This call can be used as part of loops and other expensive operations to
|
||||||
* expedite a retry.
|
* expedite a retry.
|
||||||
*/
|
*/
|
||||||
static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
|
static inline bool
|
||||||
unsigned long seq)
|
mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
|
||||||
|
unsigned long seq)
|
||||||
{
|
{
|
||||||
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
|
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
|
||||||
return READ_ONCE(mni->invalidate_seq) != seq;
|
return READ_ONCE(interval_sub->invalidate_seq) != seq;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
|
extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ struct mmu_notifier_subscriptions {
|
||||||
* seq & 1 # True if a writer exists
|
* seq & 1 # True if a writer exists
|
||||||
*
|
*
|
||||||
* The later state avoids some expensive work on inv_end in the common case of
|
* The later state avoids some expensive work on inv_end in the common case of
|
||||||
* no mni monitoring the VA.
|
* no mmu_interval_notifier monitoring the VA.
|
||||||
*/
|
*/
|
||||||
static bool
|
static bool
|
||||||
mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
|
mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
|
||||||
|
|
@ -114,13 +114,13 @@ mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mmu_interval_notifier *
|
static struct mmu_interval_notifier *
|
||||||
mn_itree_inv_next(struct mmu_interval_notifier *mni,
|
mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
|
||||||
const struct mmu_notifier_range *range)
|
const struct mmu_notifier_range *range)
|
||||||
{
|
{
|
||||||
struct interval_tree_node *node;
|
struct interval_tree_node *node;
|
||||||
|
|
||||||
node = interval_tree_iter_next(&mni->interval_tree, range->start,
|
node = interval_tree_iter_next(&interval_sub->interval_tree,
|
||||||
range->end - 1);
|
range->start, range->end - 1);
|
||||||
if (!node)
|
if (!node)
|
||||||
return NULL;
|
return NULL;
|
||||||
return container_of(node, struct mmu_interval_notifier, interval_tree);
|
return container_of(node, struct mmu_interval_notifier, interval_tree);
|
||||||
|
|
@ -128,7 +128,7 @@ mn_itree_inv_next(struct mmu_interval_notifier *mni,
|
||||||
|
|
||||||
static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
|
static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
|
||||||
{
|
{
|
||||||
struct mmu_interval_notifier *mni;
|
struct mmu_interval_notifier *interval_sub;
|
||||||
struct hlist_node *next;
|
struct hlist_node *next;
|
||||||
|
|
||||||
spin_lock(&subscriptions->lock);
|
spin_lock(&subscriptions->lock);
|
||||||
|
|
@ -147,15 +147,16 @@ static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
|
||||||
* they are progressed. This arrangement for tree updates is used to
|
* they are progressed. This arrangement for tree updates is used to
|
||||||
* avoid using a blocking lock during invalidate_range_start.
|
* avoid using a blocking lock during invalidate_range_start.
|
||||||
*/
|
*/
|
||||||
hlist_for_each_entry_safe(mni, next, &subscriptions->deferred_list,
|
hlist_for_each_entry_safe(interval_sub, next,
|
||||||
|
&subscriptions->deferred_list,
|
||||||
deferred_item) {
|
deferred_item) {
|
||||||
if (RB_EMPTY_NODE(&mni->interval_tree.rb))
|
if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
|
||||||
interval_tree_insert(&mni->interval_tree,
|
interval_tree_insert(&interval_sub->interval_tree,
|
||||||
&subscriptions->itree);
|
&subscriptions->itree);
|
||||||
else
|
else
|
||||||
interval_tree_remove(&mni->interval_tree,
|
interval_tree_remove(&interval_sub->interval_tree,
|
||||||
&subscriptions->itree);
|
&subscriptions->itree);
|
||||||
hlist_del(&mni->deferred_item);
|
hlist_del(&interval_sub->deferred_item);
|
||||||
}
|
}
|
||||||
spin_unlock(&subscriptions->lock);
|
spin_unlock(&subscriptions->lock);
|
||||||
|
|
||||||
|
|
@ -165,12 +166,12 @@ static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
|
||||||
/**
|
/**
|
||||||
* mmu_interval_read_begin - Begin a read side critical section against a VA
|
* mmu_interval_read_begin - Begin a read side critical section against a VA
|
||||||
* range
|
* range
|
||||||
* mni: The range to use
|
* interval_sub: The interval subscription
|
||||||
*
|
*
|
||||||
* mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
|
* mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
|
||||||
* collision-retry scheme similar to seqcount for the VA range under mni. If
|
* collision-retry scheme similar to seqcount for the VA range under
|
||||||
* the mm invokes invalidation during the critical section then
|
* subscription. If the mm invokes invalidation during the critical section
|
||||||
* mmu_interval_read_retry() will return true.
|
* then mmu_interval_read_retry() will return true.
|
||||||
*
|
*
|
||||||
* This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
|
* This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
|
||||||
* require a blocking context. The critical region formed by this can sleep,
|
* require a blocking context. The critical region formed by this can sleep,
|
||||||
|
|
@ -181,25 +182,26 @@ static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
|
||||||
*
|
*
|
||||||
* The return value should be passed to mmu_interval_read_retry().
|
* The return value should be passed to mmu_interval_read_retry().
|
||||||
*/
|
*/
|
||||||
unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
|
unsigned long
|
||||||
|
mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
|
||||||
{
|
{
|
||||||
struct mmu_notifier_subscriptions *subscriptions =
|
struct mmu_notifier_subscriptions *subscriptions =
|
||||||
mni->mm->notifier_subscriptions;
|
interval_sub->mm->notifier_subscriptions;
|
||||||
unsigned long seq;
|
unsigned long seq;
|
||||||
bool is_invalidating;
|
bool is_invalidating;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the mni has a different seq value under the user_lock than we
|
* If the subscription has a different seq value under the user_lock
|
||||||
* started with then it has collided.
|
* than we started with then it has collided.
|
||||||
*
|
*
|
||||||
* If the mni currently has the same seq value as the subscriptions
|
* If the subscription currently has the same seq value as the
|
||||||
* seq, then it is currently between invalidate_start/end and is
|
* subscriptions seq, then it is currently between
|
||||||
* colliding.
|
* invalidate_start/end and is colliding.
|
||||||
*
|
*
|
||||||
* The locking looks broadly like this:
|
* The locking looks broadly like this:
|
||||||
* mn_tree_invalidate_start(): mmu_interval_read_begin():
|
* mn_tree_invalidate_start(): mmu_interval_read_begin():
|
||||||
* spin_lock
|
* spin_lock
|
||||||
* seq = READ_ONCE(mni->invalidate_seq);
|
* seq = READ_ONCE(interval_sub->invalidate_seq);
|
||||||
* seq == subs->invalidate_seq
|
* seq == subs->invalidate_seq
|
||||||
* spin_unlock
|
* spin_unlock
|
||||||
* spin_lock
|
* spin_lock
|
||||||
|
|
@ -208,7 +210,7 @@ unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
|
||||||
* op->invalidate_range():
|
* op->invalidate_range():
|
||||||
* user_lock
|
* user_lock
|
||||||
* mmu_interval_set_seq()
|
* mmu_interval_set_seq()
|
||||||
* mni->invalidate_seq = seq
|
* interval_sub->invalidate_seq = seq
|
||||||
* user_unlock
|
* user_unlock
|
||||||
*
|
*
|
||||||
* [Required: mmu_interval_read_retry() == true]
|
* [Required: mmu_interval_read_retry() == true]
|
||||||
|
|
@ -220,7 +222,7 @@ unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
|
||||||
*
|
*
|
||||||
* user_lock
|
* user_lock
|
||||||
* mmu_interval_read_retry():
|
* mmu_interval_read_retry():
|
||||||
* mni->invalidate_seq != seq
|
* interval_sub->invalidate_seq != seq
|
||||||
* user_unlock
|
* user_unlock
|
||||||
*
|
*
|
||||||
* Barriers are not needed here as any races here are closed by an
|
* Barriers are not needed here as any races here are closed by an
|
||||||
|
|
@ -229,12 +231,12 @@ unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
|
||||||
*/
|
*/
|
||||||
spin_lock(&subscriptions->lock);
|
spin_lock(&subscriptions->lock);
|
||||||
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
|
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
|
||||||
seq = READ_ONCE(mni->invalidate_seq);
|
seq = READ_ONCE(interval_sub->invalidate_seq);
|
||||||
is_invalidating = seq == subscriptions->invalidate_seq;
|
is_invalidating = seq == subscriptions->invalidate_seq;
|
||||||
spin_unlock(&subscriptions->lock);
|
spin_unlock(&subscriptions->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mni->invalidate_seq must always be set to an odd value via
|
* interval_sub->invalidate_seq must always be set to an odd value via
|
||||||
* mmu_interval_set_seq() using the provided cur_seq from
|
* mmu_interval_set_seq() using the provided cur_seq from
|
||||||
* mn_itree_inv_start_range(). This ensures that if seq does wrap we
|
* mn_itree_inv_start_range(). This ensures that if seq does wrap we
|
||||||
* will always clear the below sleep in some reasonable time as
|
* will always clear the below sleep in some reasonable time as
|
||||||
|
|
@ -266,13 +268,16 @@ static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
|
||||||
.start = 0,
|
.start = 0,
|
||||||
.end = ULONG_MAX,
|
.end = ULONG_MAX,
|
||||||
};
|
};
|
||||||
struct mmu_interval_notifier *mni;
|
struct mmu_interval_notifier *interval_sub;
|
||||||
unsigned long cur_seq;
|
unsigned long cur_seq;
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
for (mni = mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
|
for (interval_sub =
|
||||||
mni; mni = mn_itree_inv_next(mni, &range)) {
|
mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
|
||||||
ret = mni->ops->invalidate(mni, &range, cur_seq);
|
interval_sub;
|
||||||
|
interval_sub = mn_itree_inv_next(interval_sub, &range)) {
|
||||||
|
ret = interval_sub->ops->invalidate(interval_sub, &range,
|
||||||
|
cur_seq);
|
||||||
WARN_ON(!ret);
|
WARN_ON(!ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -434,14 +439,17 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
|
||||||
static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
|
static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
|
||||||
const struct mmu_notifier_range *range)
|
const struct mmu_notifier_range *range)
|
||||||
{
|
{
|
||||||
struct mmu_interval_notifier *mni;
|
struct mmu_interval_notifier *interval_sub;
|
||||||
unsigned long cur_seq;
|
unsigned long cur_seq;
|
||||||
|
|
||||||
for (mni = mn_itree_inv_start_range(subscriptions, range, &cur_seq);
|
for (interval_sub =
|
||||||
mni; mni = mn_itree_inv_next(mni, range)) {
|
mn_itree_inv_start_range(subscriptions, range, &cur_seq);
|
||||||
|
interval_sub;
|
||||||
|
interval_sub = mn_itree_inv_next(interval_sub, range)) {
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
ret = mni->ops->invalidate(mni, range, cur_seq);
|
ret = interval_sub->ops->invalidate(interval_sub, range,
|
||||||
|
cur_seq);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
if (WARN_ON(mmu_notifier_range_blockable(range)))
|
if (WARN_ON(mmu_notifier_range_blockable(range)))
|
||||||
continue;
|
continue;
|
||||||
|
|
@ -878,20 +886,21 @@ out_unlock:
|
||||||
EXPORT_SYMBOL_GPL(mmu_notifier_put);
|
EXPORT_SYMBOL_GPL(mmu_notifier_put);
|
||||||
|
|
||||||
static int __mmu_interval_notifier_insert(
|
static int __mmu_interval_notifier_insert(
|
||||||
struct mmu_interval_notifier *mni, struct mm_struct *mm,
|
struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
|
||||||
struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
|
struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
|
||||||
unsigned long length, const struct mmu_interval_notifier_ops *ops)
|
unsigned long length, const struct mmu_interval_notifier_ops *ops)
|
||||||
{
|
{
|
||||||
mni->mm = mm;
|
interval_sub->mm = mm;
|
||||||
mni->ops = ops;
|
interval_sub->ops = ops;
|
||||||
RB_CLEAR_NODE(&mni->interval_tree.rb);
|
RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
|
||||||
mni->interval_tree.start = start;
|
interval_sub->interval_tree.start = start;
|
||||||
/*
|
/*
|
||||||
* Note that the representation of the intervals in the interval tree
|
* Note that the representation of the intervals in the interval tree
|
||||||
* considers the ending point as contained in the interval.
|
* considers the ending point as contained in the interval.
|
||||||
*/
|
*/
|
||||||
if (length == 0 ||
|
if (length == 0 ||
|
||||||
check_add_overflow(start, length - 1, &mni->interval_tree.last))
|
check_add_overflow(start, length - 1,
|
||||||
|
&interval_sub->interval_tree.last))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
/* Must call with a mmget() held */
|
/* Must call with a mmget() held */
|
||||||
|
|
@ -911,30 +920,31 @@ static int __mmu_interval_notifier_insert(
|
||||||
* possibility for live lock, instead defer the add to
|
* possibility for live lock, instead defer the add to
|
||||||
* mn_itree_inv_end() so this algorithm is deterministic.
|
* mn_itree_inv_end() so this algorithm is deterministic.
|
||||||
*
|
*
|
||||||
* In all cases the value for the mni->invalidate_seq should be
|
* In all cases the value for the interval_sub->invalidate_seq should be
|
||||||
* odd, see mmu_interval_read_begin()
|
* odd, see mmu_interval_read_begin()
|
||||||
*/
|
*/
|
||||||
spin_lock(&subscriptions->lock);
|
spin_lock(&subscriptions->lock);
|
||||||
if (subscriptions->active_invalidate_ranges) {
|
if (subscriptions->active_invalidate_ranges) {
|
||||||
if (mn_itree_is_invalidating(subscriptions))
|
if (mn_itree_is_invalidating(subscriptions))
|
||||||
hlist_add_head(&mni->deferred_item,
|
hlist_add_head(&interval_sub->deferred_item,
|
||||||
&subscriptions->deferred_list);
|
&subscriptions->deferred_list);
|
||||||
else {
|
else {
|
||||||
subscriptions->invalidate_seq |= 1;
|
subscriptions->invalidate_seq |= 1;
|
||||||
interval_tree_insert(&mni->interval_tree,
|
interval_tree_insert(&interval_sub->interval_tree,
|
||||||
&subscriptions->itree);
|
&subscriptions->itree);
|
||||||
}
|
}
|
||||||
mni->invalidate_seq = subscriptions->invalidate_seq;
|
interval_sub->invalidate_seq = subscriptions->invalidate_seq;
|
||||||
} else {
|
} else {
|
||||||
WARN_ON(mn_itree_is_invalidating(subscriptions));
|
WARN_ON(mn_itree_is_invalidating(subscriptions));
|
||||||
/*
|
/*
|
||||||
* The starting seq for a mni not under invalidation should be
|
* The starting seq for a subscription not under invalidation
|
||||||
* odd, not equal to the current invalidate_seq and
|
* should be odd, not equal to the current invalidate_seq and
|
||||||
* invalidate_seq should not 'wrap' to the new seq any time
|
* invalidate_seq should not 'wrap' to the new seq any time
|
||||||
* soon.
|
* soon.
|
||||||
*/
|
*/
|
||||||
mni->invalidate_seq = subscriptions->invalidate_seq - 1;
|
interval_sub->invalidate_seq =
|
||||||
interval_tree_insert(&mni->interval_tree,
|
subscriptions->invalidate_seq - 1;
|
||||||
|
interval_tree_insert(&interval_sub->interval_tree,
|
||||||
&subscriptions->itree);
|
&subscriptions->itree);
|
||||||
}
|
}
|
||||||
spin_unlock(&subscriptions->lock);
|
spin_unlock(&subscriptions->lock);
|
||||||
|
|
@ -943,7 +953,7 @@ static int __mmu_interval_notifier_insert(
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmu_interval_notifier_insert - Insert an interval notifier
|
* mmu_interval_notifier_insert - Insert an interval notifier
|
||||||
* @mni: Interval notifier to register
|
* @interval_sub: Interval subscription to register
|
||||||
* @start: Starting virtual address to monitor
|
* @start: Starting virtual address to monitor
|
||||||
* @length: Length of the range to monitor
|
* @length: Length of the range to monitor
|
||||||
* @mm : mm_struct to attach to
|
* @mm : mm_struct to attach to
|
||||||
|
|
@ -956,7 +966,7 @@ static int __mmu_interval_notifier_insert(
|
||||||
* The caller must use the normal interval notifier read flow via
|
* The caller must use the normal interval notifier read flow via
|
||||||
* mmu_interval_read_begin() to establish SPTEs for this range.
|
* mmu_interval_read_begin() to establish SPTEs for this range.
|
||||||
*/
|
*/
|
||||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
|
int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
|
||||||
struct mm_struct *mm, unsigned long start,
|
struct mm_struct *mm, unsigned long start,
|
||||||
unsigned long length,
|
unsigned long length,
|
||||||
const struct mmu_interval_notifier_ops *ops)
|
const struct mmu_interval_notifier_ops *ops)
|
||||||
|
|
@ -973,13 +983,13 @@ int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
|
||||||
return ret;
|
return ret;
|
||||||
subscriptions = mm->notifier_subscriptions;
|
subscriptions = mm->notifier_subscriptions;
|
||||||
}
|
}
|
||||||
return __mmu_interval_notifier_insert(mni, mm, subscriptions, start,
|
return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
|
||||||
length, ops);
|
start, length, ops);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
|
EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
|
||||||
|
|
||||||
int mmu_interval_notifier_insert_locked(
|
int mmu_interval_notifier_insert_locked(
|
||||||
struct mmu_interval_notifier *mni, struct mm_struct *mm,
|
struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long length,
|
unsigned long start, unsigned long length,
|
||||||
const struct mmu_interval_notifier_ops *ops)
|
const struct mmu_interval_notifier_ops *ops)
|
||||||
{
|
{
|
||||||
|
|
@ -995,14 +1005,14 @@ int mmu_interval_notifier_insert_locked(
|
||||||
return ret;
|
return ret;
|
||||||
subscriptions = mm->notifier_subscriptions;
|
subscriptions = mm->notifier_subscriptions;
|
||||||
}
|
}
|
||||||
return __mmu_interval_notifier_insert(mni, mm, subscriptions, start,
|
return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
|
||||||
length, ops);
|
start, length, ops);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
|
EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmu_interval_notifier_remove - Remove a interval notifier
|
* mmu_interval_notifier_remove - Remove a interval notifier
|
||||||
* @mni: Interval notifier to unregister
|
* @interval_sub: Interval subscription to unregister
|
||||||
*
|
*
|
||||||
* This function must be paired with mmu_interval_notifier_insert(). It cannot
|
* This function must be paired with mmu_interval_notifier_insert(). It cannot
|
||||||
* be called from any ops callback.
|
* be called from any ops callback.
|
||||||
|
|
@ -1010,9 +1020,9 @@ EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
|
||||||
* Once this returns ops callbacks are no longer running on other CPUs and
|
* Once this returns ops callbacks are no longer running on other CPUs and
|
||||||
* will not be called in future.
|
* will not be called in future.
|
||||||
*/
|
*/
|
||||||
void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni)
|
void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = mni->mm;
|
struct mm_struct *mm = interval_sub->mm;
|
||||||
struct mmu_notifier_subscriptions *subscriptions =
|
struct mmu_notifier_subscriptions *subscriptions =
|
||||||
mm->notifier_subscriptions;
|
mm->notifier_subscriptions;
|
||||||
unsigned long seq = 0;
|
unsigned long seq = 0;
|
||||||
|
|
@ -1025,16 +1035,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni)
|
||||||
* remove is being called after insert put this on the
|
* remove is being called after insert put this on the
|
||||||
* deferred list, but before the deferred list was processed.
|
* deferred list, but before the deferred list was processed.
|
||||||
*/
|
*/
|
||||||
if (RB_EMPTY_NODE(&mni->interval_tree.rb)) {
|
if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
|
||||||
hlist_del(&mni->deferred_item);
|
hlist_del(&interval_sub->deferred_item);
|
||||||
} else {
|
} else {
|
||||||
hlist_add_head(&mni->deferred_item,
|
hlist_add_head(&interval_sub->deferred_item,
|
||||||
&subscriptions->deferred_list);
|
&subscriptions->deferred_list);
|
||||||
seq = subscriptions->invalidate_seq;
|
seq = subscriptions->invalidate_seq;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
WARN_ON(RB_EMPTY_NODE(&mni->interval_tree.rb));
|
WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
|
||||||
interval_tree_remove(&mni->interval_tree,
|
interval_tree_remove(&interval_sub->interval_tree,
|
||||||
&subscriptions->itree);
|
&subscriptions->itree);
|
||||||
}
|
}
|
||||||
spin_unlock(&subscriptions->lock);
|
spin_unlock(&subscriptions->lock);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue