Misc fixes:
- Fix NULL pointer dereference crash in the Intel PMU driver - Fix missing read event generation on task exit - Fix AMD uncore driver init error handling - Fix whitespace noise Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmk74boRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hpjw//cILE+pfjJKc+rhpY1DTBi4GMsIlL7SGH 8sZhelj+u1N9+gg2xM2PJL+2MV5nSTmFnFoXyHn5seAEmaKwJHsELTIDGa37PmrQ SEI4X5TQQ3M6XD8AWgvc+okdoxh0eQSZj0MOOCU6X+2RaQi4BuMuRa8Omz18NEof 2Sm6+rC1XICzsSKyJFBRAwPeQcrW7oVsaVD0z+bPVimn17IF+euKIkTbMEBahjGJ 3ejN/l37Fc7UrfX9nSVLiS3WdueCdagT0QN6eRDiZDZ446Rac8cqtvs0vI+YG50V 6lD+WcVGj2MF6rq7x7VXIxdijHQXE8ycH5x3FKqg5b4iX+l0oF2sBcwdaFa0hiUB a0gjUO6bCqPzaXFFbEGBhewn8W2imv4pdQbf8G3GS+Jpox5S6XxVNJyoRj6e5QZS xXO/6F50oMgeMiOREf/mmADjX6ZrLMcCEPQ4d1vqOmatvn46oiW/mifClK7Vnt3J hprqhtvZ/CEICe5nDXEbZRClzBcGIMYw2WyrlmFKJ1VCCZMuH5gwi866wIcT2nQk qhOeBJxPbAD8v59YL78rix4pLayXOi8DISedTtEzEzumaLEGJFo1YkXB5U3PJvcQ dps6fyPaGNVH7+rVHlmNfokOTiNm2n9vZOhqxFYouLIEzgmmY9LN7oUQsPORwA4Z aYgKfuEckV4= =8ILq -----END PGP SIGNATURE----- Merge tag 'perf-urgent-2025-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf event fixes from Ingo Molnar: - Fix NULL pointer dereference crash in the Intel PMU driver - Fix missing read event generation on task exit - Fix AMD uncore driver init error handling - Fix whitespace noise * tag 'perf-urgent-2025-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel: Fix NULL event dereference crash in handle_pmi_common() perf/core: Fix missing read event generation on task exit perf/x86/amd/uncore: Fix the return value of amd_uncore_df_event_init() on error perf/uprobes: Remove <space><Tab> whitespace noisemaster
commit
cba09e3ed0
|
|
@ -656,14 +656,11 @@ static int amd_uncore_df_event_init(struct perf_event *event)
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int ret = amd_uncore_event_init(event);
|
int ret = amd_uncore_event_init(event);
|
||||||
|
|
||||||
if (ret || pmu_version < 2)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
hwc->config = event->attr.config &
|
hwc->config = event->attr.config &
|
||||||
(pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB :
|
(pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB :
|
||||||
AMD64_RAW_EVENT_MASK_NB);
|
AMD64_RAW_EVENT_MASK_NB);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amd_uncore_df_add(struct perf_event *event, int flags)
|
static int amd_uncore_df_add(struct perf_event *event, int flags)
|
||||||
|
|
|
||||||
|
|
@ -3378,6 +3378,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||||
|
|
||||||
if (!test_bit(bit, cpuc->active_mask))
|
if (!test_bit(bit, cpuc->active_mask))
|
||||||
continue;
|
continue;
|
||||||
|
/* Event may have already been cleared: */
|
||||||
|
if (!event)
|
||||||
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There may be unprocessed PEBS records in the PEBS buffer,
|
* There may be unprocessed PEBS records in the PEBS buffer,
|
||||||
|
|
|
||||||
|
|
@ -2317,8 +2317,6 @@ out:
|
||||||
perf_event__header_size(leader);
|
perf_event__header_size(leader);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sync_child_event(struct perf_event *child_event);
|
|
||||||
|
|
||||||
static void perf_child_detach(struct perf_event *event)
|
static void perf_child_detach(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct perf_event *parent_event = event->parent;
|
struct perf_event *parent_event = event->parent;
|
||||||
|
|
@ -2337,7 +2335,6 @@ static void perf_child_detach(struct perf_event *event)
|
||||||
lockdep_assert_held(&parent_event->child_mutex);
|
lockdep_assert_held(&parent_event->child_mutex);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sync_child_event(event);
|
|
||||||
list_del_init(&event->child_list);
|
list_del_init(&event->child_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -4588,6 +4585,7 @@ out:
|
||||||
static void perf_remove_from_owner(struct perf_event *event);
|
static void perf_remove_from_owner(struct perf_event *event);
|
||||||
static void perf_event_exit_event(struct perf_event *event,
|
static void perf_event_exit_event(struct perf_event *event,
|
||||||
struct perf_event_context *ctx,
|
struct perf_event_context *ctx,
|
||||||
|
struct task_struct *task,
|
||||||
bool revoke);
|
bool revoke);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -4615,7 +4613,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx)
|
||||||
|
|
||||||
modified = true;
|
modified = true;
|
||||||
|
|
||||||
perf_event_exit_event(event, ctx, false);
|
perf_event_exit_event(event, ctx, ctx->task, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||||
|
|
@ -12518,7 +12516,7 @@ static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
|
||||||
/*
|
/*
|
||||||
* De-schedule the event and mark it REVOKED.
|
* De-schedule the event and mark it REVOKED.
|
||||||
*/
|
*/
|
||||||
perf_event_exit_event(event, ctx, true);
|
perf_event_exit_event(event, ctx, ctx->task, true);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All _free_event() bits that rely on event->pmu:
|
* All _free_event() bits that rely on event->pmu:
|
||||||
|
|
@ -14075,14 +14073,13 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
|
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
|
||||||
|
|
||||||
static void sync_child_event(struct perf_event *child_event)
|
static void sync_child_event(struct perf_event *child_event,
|
||||||
|
struct task_struct *task)
|
||||||
{
|
{
|
||||||
struct perf_event *parent_event = child_event->parent;
|
struct perf_event *parent_event = child_event->parent;
|
||||||
u64 child_val;
|
u64 child_val;
|
||||||
|
|
||||||
if (child_event->attr.inherit_stat) {
|
if (child_event->attr.inherit_stat) {
|
||||||
struct task_struct *task = child_event->ctx->task;
|
|
||||||
|
|
||||||
if (task && task != TASK_TOMBSTONE)
|
if (task && task != TASK_TOMBSTONE)
|
||||||
perf_event_read_event(child_event, task);
|
perf_event_read_event(child_event, task);
|
||||||
}
|
}
|
||||||
|
|
@ -14101,7 +14098,9 @@ static void sync_child_event(struct perf_event *child_event)
|
||||||
|
|
||||||
static void
|
static void
|
||||||
perf_event_exit_event(struct perf_event *event,
|
perf_event_exit_event(struct perf_event *event,
|
||||||
struct perf_event_context *ctx, bool revoke)
|
struct perf_event_context *ctx,
|
||||||
|
struct task_struct *task,
|
||||||
|
bool revoke)
|
||||||
{
|
{
|
||||||
struct perf_event *parent_event = event->parent;
|
struct perf_event *parent_event = event->parent;
|
||||||
unsigned long detach_flags = DETACH_EXIT;
|
unsigned long detach_flags = DETACH_EXIT;
|
||||||
|
|
@ -14124,6 +14123,9 @@ perf_event_exit_event(struct perf_event *event,
|
||||||
mutex_lock(&parent_event->child_mutex);
|
mutex_lock(&parent_event->child_mutex);
|
||||||
/* PERF_ATTACH_ITRACE might be set concurrently */
|
/* PERF_ATTACH_ITRACE might be set concurrently */
|
||||||
attach_state = READ_ONCE(event->attach_state);
|
attach_state = READ_ONCE(event->attach_state);
|
||||||
|
|
||||||
|
if (attach_state & PERF_ATTACH_CHILD)
|
||||||
|
sync_child_event(event, task);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (revoke)
|
if (revoke)
|
||||||
|
|
@ -14215,7 +14217,7 @@ static void perf_event_exit_task_context(struct task_struct *task, bool exit)
|
||||||
perf_event_task(task, ctx, 0);
|
perf_event_task(task, ctx, 0);
|
||||||
|
|
||||||
list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
|
list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
|
||||||
perf_event_exit_event(child_event, ctx, false);
|
perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
|
||||||
|
|
||||||
mutex_unlock(&ctx->mutex);
|
mutex_unlock(&ctx->mutex);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ struct uprobe {
|
||||||
* The generic code assumes that it has two members of unknown type
|
* The generic code assumes that it has two members of unknown type
|
||||||
* owned by the arch-specific code:
|
* owned by the arch-specific code:
|
||||||
*
|
*
|
||||||
* insn - copy_insn() saves the original instruction here for
|
* insn - copy_insn() saves the original instruction here for
|
||||||
* arch_uprobe_analyze_insn().
|
* arch_uprobe_analyze_insn().
|
||||||
*
|
*
|
||||||
* ixol - potentially modified instruction to execute out of
|
* ixol - potentially modified instruction to execute out of
|
||||||
|
|
@ -107,8 +107,8 @@ static LIST_HEAD(delayed_uprobe_list);
|
||||||
* allocated.
|
* allocated.
|
||||||
*/
|
*/
|
||||||
struct xol_area {
|
struct xol_area {
|
||||||
wait_queue_head_t wq; /* if all slots are busy */
|
wait_queue_head_t wq; /* if all slots are busy */
|
||||||
unsigned long *bitmap; /* 0 = free slot */
|
unsigned long *bitmap; /* 0 = free slot */
|
||||||
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
/*
|
/*
|
||||||
|
|
@ -116,7 +116,7 @@ struct xol_area {
|
||||||
* itself. The probed process or a naughty kernel module could make
|
* itself. The probed process or a naughty kernel module could make
|
||||||
* the vma go away, and we must handle that reasonably gracefully.
|
* the vma go away, and we must handle that reasonably gracefully.
|
||||||
*/
|
*/
|
||||||
unsigned long vaddr; /* Page(s) of instruction slots */
|
unsigned long vaddr; /* Page(s) of instruction slots */
|
||||||
};
|
};
|
||||||
|
|
||||||
static void uprobe_warn(struct task_struct *t, const char *msg)
|
static void uprobe_warn(struct task_struct *t, const char *msg)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue