#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/tick.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/trace_events.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/namei.h>
#include <linux/parser.h>
#include <linux/sched/clock.h>
#include <linux/sched/mm.h>
#include <linux/proc_ns.h>
#include <linux/mount.h>
#include <linux/min_heap.h>
#include <linux/highmem.h>
#include <linux/pgtable.h>
#include <linux/buildid.h>
#include <linux/task_work.h>
#include "internal.h"
#include <asm/irq_regs.h>
typedef int (*remote_function_f)(void *);
struct remote_function_call {
struct task_struct *p;
remote_function_f func;
void *info;
int ret;
};
static void remote_function(void *data)
{
struct remote_function_call *tfc = data;
struct task_struct *p = tfc->p;
if (p) {
if (task_cpu(p) != smp_processor_id())
return;
tfc->ret = -ESRCH;
if (p != current)
return;
}
tfc->ret = tfc->func(tfc->info);
}
static int
task_function_call(struct task_struct *p, remote_function_f func, void *info)
{
struct remote_function_call data = {
.p = p,
.func = func,
.info = info,
.ret = -EAGAIN,
};
int ret;
for (;;) {
ret = smp_call_function_single(task_cpu(p), remote_function,
&data, 1);
if (!ret)
ret = data.ret;
if (ret != -EAGAIN)
break;
cond_resched();
}
return ret;
}
static int cpu_function_call(int cpu, remote_function_f func, void *info)
{
struct remote_function_call data = {
.p = NULL,
.func = func,
.info = info,
.ret = -ENXIO,
};
smp_call_function_single(cpu, remote_function, &data, 1);
return data.ret;
}
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx)
raw_spin_lock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
raw_spin_unlock(&ctx->lock);
raw_spin_unlock(&cpuctx->ctx.lock);
}
#define TASK_TOMBSTONE ((void *)-1L)
static bool is_kernel_event(struct perf_event *event)
{
return READ_ONCE(event->owner) == TASK_TOMBSTONE;
}
static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
struct perf_event_context *perf_cpu_task_ctx(void)
{
lockdep_assert_irqs_disabled();
return this_cpu_ptr(&perf_cpu_context)->task_ctx;
}
typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
struct perf_event_context *, void *);
struct event_function_struct {
struct perf_event *event;
event_f func;
void *data;
};
static int event_function(void *info)
{
struct event_function_struct *efs = info;
struct perf_event *event = efs->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
int ret = 0;
lockdep_assert_irqs_disabled();
perf_ctx_lock(cpuctx, task_ctx);
if (ctx->task) {
if (ctx->task != current) {
ret = -ESRCH;
goto unlock;
}
WARN_ON_ONCE(!ctx->is_active);
WARN_ON_ONCE(task_ctx != ctx);
} else {
WARN_ON_ONCE(&cpuctx->ctx != ctx);
}
efs->func(event, cpuctx, ctx, efs->data);
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
return ret;
}
static void event_function_call(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = READ_ONCE(ctx->task);
struct event_function_struct efs = {
.event = event,
.func = func,
.data = data,
};
if (!event->parent) {
lockdep_assert_held(&ctx->mutex);
}
if (!task) {
cpu_function_call(event->cpu, event_function, &efs);
return;
}
if (task == TASK_TOMBSTONE)
return;
again:
if (!task_function_call(task, event_function, &efs))
return;
raw_spin_lock_irq(&ctx->lock);
task = ctx->task;
if (task == TASK_TOMBSTONE) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto again;
}
func(event, NULL, ctx, data);
raw_spin_unlock_irq(&ctx->lock);
}
static void event_function_local(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct task_struct *task = READ_ONCE(ctx->task);
struct perf_event_context *task_ctx = NULL;
lockdep_assert_irqs_disabled();
if (task) {
if (task == TASK_TOMBSTONE)
return;
task_ctx = ctx;
}
perf_ctx_lock(cpuctx, task_ctx);
task = ctx->task;
if (task == TASK_TOMBSTONE)
goto unlock;
if (task) {
if (ctx->is_active) {
if (WARN_ON_ONCE(task != current))
goto unlock;
if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
goto unlock;
}
} else {
WARN_ON_ONCE(&cpuctx->ctx != ctx);
}
func(event, cpuctx, ctx, data);
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
PERF_FLAG_FD_CLOEXEC)
#define PERF_SAMPLE_BRANCH_PERM_PLM \
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_TIME = 0x4,
EVENT_CPU = 0x8,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
static void perf_sched_delayed(struct work_struct *work);
DEFINE_STATIC_KEY_FALSE(perf_sched_events);
static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
static DEFINE_MUTEX(perf_sched_mutex);
static atomic_t perf_sched_count;
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_namespaces_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static atomic_t nr_freq_events __read_mostly;
static atomic_t nr_switch_events __read_mostly;
static atomic_t nr_ksymbol_events __read_mostly;
static atomic_t nr_bpf_events __read_mostly;
static atomic_t nr_cgroup_events __read_mostly;
static atomic_t nr_text_poke_events __read_mostly;
static atomic_t nr_build_id_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
static cpumask_var_t perf_online_mask;
static struct kmem_cache *perf_event_cache;
int sysctl_perf_event_paranoid __read_mostly = 2;
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024);
#define DEFAULT_MAX_SAMPLE_RATE 100000
#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
#define DEFAULT_CPU_TIME_MAX_PERCENT 25
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
static int perf_sample_allowed_ns __read_mostly =
DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
static void update_perf_cpu_limits(void)
{
u64 tmp = perf_sample_period_ns;
tmp *= sysctl_perf_cpu_time_max_percent;
tmp = div_u64(tmp, 100);
if (!tmp)
tmp = 1;
WRITE_ONCE(perf_sample_allowed_ns, tmp);
}
static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
int perf_proc_update_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
int perf_cpu = sysctl_perf_cpu_time_max_percent;
if (write && (perf_cpu == 100 || perf_cpu == 0))
return -EINVAL;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
update_perf_cpu_limits();
return 0;
}
int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
if (sysctl_perf_cpu_time_max_percent == 100 ||
sysctl_perf_cpu_time_max_percent == 0) {
printk(KERN_WARNING
"perf: Dynamic interrupt throttling disabled, can hang your system!\n");
WRITE_ONCE(perf_sample_allowed_ns, 0);
} else {
update_perf_cpu_limits();
}
return 0;
}
#define NR_ACCUMULATED_SAMPLES 128
static DEFINE_PER_CPU(u64, running_sample_length);
static u64 __report_avg;
static u64 __report_allowed;
static void perf_duration_warn(struct irq_work *w)
{
printk_ratelimited(KERN_INFO
"perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
sysctl_perf_event_sample_rate);
}
static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
void perf_sample_event_took(u64 sample_len_ns)
{
u64 max_len = READ_ONCE(perf_sample_allowed_ns);
u64 running_len;
u64 avg_len;
u32 max;
if (max_len == 0)
return;
running_len = __this_cpu_read(running_sample_length);
running_len -= running_len/NR_ACCUMULATED_SAMPLES;
running_len += sample_len_ns;
__this_cpu_write(running_sample_length, running_len);
avg_len = running_len/NR_ACCUMULATED_SAMPLES;
if (avg_len <= max_len)
return;
__report_avg = avg_len;
__report_allowed = max_len;
avg_len += avg_len / 4;
max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
if (avg_len < max)
max /= (u32)avg_len;
else
max = 1;
WRITE_ONCE(perf_sample_allowed_ns, avg_len);
WRITE_ONCE(max_samples_per_tick, max);
sysctl_perf_event_sample_rate = max * HZ;
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
if (!irq_work_queue(&perf_duration_work)) {
early_printk("perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
sysctl_perf_event_sample_rate);
}
}
static atomic64_t perf_event_id;
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
void __weak perf_event_print_debug(void) { }
static inline u64 perf_clock(void)
{
return local_clock();
}
static inline u64 perf_event_clock(struct perf_event *event)
{
return event->clock();
}
static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
if (leader->state <= PERF_EVENT_STATE_OFF)
return leader->state;
return event->state;
}
static __always_inline void
__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
{
enum perf_event_state state = __perf_effective_state(event);
u64 delta = now - event->tstamp;
*enabled = event->total_time_enabled;
if (state >= PERF_EVENT_STATE_INACTIVE)
*enabled += delta;
*running = event->total_time_running;
if (state >= PERF_EVENT_STATE_ACTIVE)
*running += delta;
}
static void perf_event_update_time(struct perf_event *event)
{
u64 now = perf_event_time(event);
__perf_update_times(event, now, &event->total_time_enabled,
&event->total_time_running);
event->tstamp = now;
}
static void perf_event_update_sibling_time(struct perf_event *leader)
{
struct perf_event *sibling;
for_each_sibling_event(sibling, leader)
perf_event_update_time(sibling);
}
static void
perf_event_set_state(struct perf_event *event, enum perf_event_state state)
{
if (event->state == state)
return;
perf_event_update_time(event);
if ((event->state < 0) ^ (state < 0))
perf_event_update_sibling_time(event);
WRITE_ONCE(event->state, state);
}
#define __store_release(ptr, val) \
do { \
barrier(); \
WRITE_ONCE(*(ptr), (val)); \
} while (0)
#define __load_acquire(ptr) \
({ \
__unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
barrier(); \
___p; \
})
static void perf_ctx_disable(struct perf_event_context *ctx)
{
struct perf_event_pmu_context *pmu_ctx;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
perf_pmu_disable(pmu_ctx->pmu);
}
static void perf_ctx_enable(struct perf_event_context *ctx)
{
struct perf_event_pmu_context *pmu_ctx;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
perf_pmu_enable(pmu_ctx->pmu);
}
static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
#ifdef CONFIG_CGROUP_PERF
static inline bool
perf_cgroup_match(struct perf_event *event)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
if (!event->cgrp)
return true;
if (!cpuctx->cgrp)
return false;
return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
event->cgrp->css.cgroup);
}
static inline void perf_detach_cgroup(struct perf_event *event)
{
css_put(&event->cgrp->css);
event->cgrp = NULL;
}
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
return t->time;
}
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
if (!__load_acquire(&t->active))
return t->time;
now += READ_ONCE(t->timeoffset);
return now;
}
static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
{
if (adv)
info->time += now - info->timestamp;
info->timestamp = now;
WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{
struct perf_cgroup *cgrp = cpuctx->cgrp;
struct cgroup_subsys_state *css;
struct perf_cgroup_info *info;
if (cgrp) {
u64 now = perf_clock();
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
__update_cgrp_time(info, now, true);
if (final)
__store_release(&info->active, 0);
}
}
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup_info *info;
if (!is_cgroup_event(event))
return;
info = this_cpu_ptr(event->cgrp->info);
if (info->active)
__update_cgrp_time(info, perf_clock(), true);
}
static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = &cpuctx->ctx;
struct perf_cgroup *cgrp = cpuctx->cgrp;
struct perf_cgroup_info *info;
struct cgroup_subsys_state *css;
if (!cgrp)
return;
WARN_ON_ONCE(!ctx->nr_cgroups);
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
__update_cgrp_time(info, ctx->timestamp, false);
__store_release(&info->active, 1);
}
}
static void perf_cgroup_switch(struct task_struct *task)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_cgroup *cgrp;
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
cgrp = perf_cgroup_from_task(task, NULL);
if (READ_ONCE(cpuctx->cgrp) == cgrp)
return;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_ctx_disable(&cpuctx->ctx);
ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
cpuctx->cgrp = cgrp;
ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
perf_ctx_enable(&cpuctx->ctx);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
static int perf_cgroup_ensure_storage(struct perf_event *event,
struct cgroup_subsys_state *css)
{
struct perf_cpu_context *cpuctx;
struct perf_event **storage;
int cpu, heap_size, ret = 0;
for (heap_size = 1; css; css = css->parent)
heap_size++;
for_each_possible_cpu(cpu) {
cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
if (heap_size <= cpuctx->heap_size)
continue;
storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
GFP_KERNEL, cpu_to_node(cpu));
if (!storage) {
ret = -ENOMEM;
break;
}
raw_spin_lock_irq(&cpuctx->ctx.lock);
if (cpuctx->heap_size < heap_size) {
swap(cpuctx->heap, storage);
if (storage == cpuctx->heap_default)
storage = NULL;
cpuctx->heap_size = heap_size;
}
raw_spin_unlock_irq(&cpuctx->ctx.lock);
kfree(storage);
}
return ret;
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
struct perf_cgroup *cgrp;
struct cgroup_subsys_state *css;
struct fd f = fdget(fd);
int ret = 0;
if (!f.file)
return -EBADF;
css = css_tryget_online_from_dir(f.file->f_path.dentry,
&perf_event_cgrp_subsys);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
}
ret = perf_cgroup_ensure_storage(event, css);
if (ret)
goto out;
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
if (group_leader && group_leader->cgrp != cgrp) {
perf_detach_cgroup(event);
ret = -EINVAL;
}
out:
fdput(f);
return ret;
}
static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
if (!is_cgroup_event(event))
return;
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
if (ctx->nr_cgroups++)
return;
cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
}
static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
if (!is_cgroup_event(event))
return;
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
if (--ctx->nr_cgroups)
return;
cpuctx->cgrp = NULL;
}
#else /* !CONFIG_CGROUP_PERF */
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
bool final)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
return 0;
}
static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{
}
static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{
}
static void perf_cgroup_switch(struct task_struct *task)
{
}
#endif
#define PERF_CPU_HRTIMER (1000 / HZ)
static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
{
struct perf_cpu_pmu_context *cpc;
bool rotations;
lockdep_assert_irqs_disabled();
cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
rotations = perf_rotate_context(cpc);
raw_spin_lock(&cpc->hrtimer_lock);
if (rotations)
hrtimer_forward_now(hr, cpc->hrtimer_interval);
else
cpc->hrtimer_active = 0;
raw_spin_unlock(&cpc->hrtimer_lock);
return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
}
static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
{
struct hrtimer *timer = &cpc->hrtimer;
struct pmu *pmu = cpc->epc.pmu;
u64 interval;
interval = pmu->hrtimer_interval_ms;
if (interval < 1)
interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
raw_spin_lock_init(&cpc->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
timer->function = perf_mux_hrtimer_handler;
}
static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
{
struct hrtimer *timer = &cpc->hrtimer;
unsigned long flags;
raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags);
if (!cpc->hrtimer_active) {
cpc->hrtimer_active = 1;
hrtimer_forward_now(timer, cpc->hrtimer_interval);
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
}
raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags);
return 0;
}
static int perf_mux_hrtimer_restart_ipi(void *arg)
{
return perf_mux_hrtimer_restart(arg);
}
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!(*count)++)
pmu->pmu_disable(pmu);
}
void perf_pmu_enable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!--(*count))
pmu->pmu_enable(pmu);
}
static void perf_assert_pmu_disabled(struct pmu *pmu)
{
WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
}
static void get_ctx(struct perf_event_context *ctx)
{
refcount_inc(&ctx->refcount);
}
static void *alloc_task_ctx_data(struct pmu *pmu)
{
if (pmu->task_ctx_cache)
return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
return NULL;
}
static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
{
if (pmu->task_ctx_cache && task_ctx_data)
kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
}
static void free_ctx(struct rcu_head *head)
{
struct perf_event_context *ctx;
ctx = container_of(head, struct perf_event_context, rcu_head);
kfree(ctx);
}
static void put_ctx(struct perf_event_context *ctx)
{
if (refcount_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
put_task_struct(ctx->task);
call_rcu(&ctx->rcu_head, free_ctx);
}
}
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
{
struct perf_event_context *ctx;
again:
rcu_read_lock();
ctx = READ_ONCE(event->ctx);
if (!refcount_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
mutex_lock_nested(&ctx->mutex, nesting);
if (event->ctx != ctx) {
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
goto again;
}
return ctx;
}
static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event *event)
{
return perf_event_ctx_lock_nested(event, 0);
}
static void perf_event_ctx_unlock(struct perf_event *event,
struct perf_event_context *ctx)
{
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context *ctx)
{
struct perf_event_context *parent_ctx = ctx->parent_ctx;
lockdep_assert_held(&ctx->lock);
if (parent_ctx)
ctx->parent_ctx = NULL;
ctx->generation++;
return parent_ctx;
}
static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
enum pid_type type)
{
u32 nr;
if (event->parent)
event = event->parent;
nr = __task_pid_nr_ns(p, type, event->ns);
if (!nr && !pid_alive(p))
nr = -1;
return nr;
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
return perf_event_pid_type(event, p, PIDTYPE_TGID);
}
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
return perf_event_pid_type(event, p, PIDTYPE_PID);
}
static u64 primary_event_id(struct perf_event *event)
{
u64 id = event->id;
if (event->parent)
id = event->parent->id;
return id;
}
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
{
struct perf_event_context *ctx;
retry:
local_irq_save(*flags);
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp);
if (ctx) {
raw_spin_lock(&ctx->lock);
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
raw_spin_unlock(&ctx->lock);
rcu_read_unlock();
local_irq_restore(*flags);
goto retry;
}
if (ctx->task == TASK_TOMBSTONE ||
!refcount_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
WARN_ON_ONCE(ctx->task != task);
}
}
rcu_read_unlock();
if (!ctx)
local_irq_restore(*flags);
return ctx;
}
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task)
{
struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
static void __update_context_time(struct perf_event_context *ctx, bool adv)
{
u64 now = perf_clock();
lockdep_assert_held(&ctx->lock);
if (adv)
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
}
static void update_context_time(struct perf_event_context *ctx)
{
__update_context_time(ctx, true);
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (unlikely(!ctx))
return 0;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx->time;
}
static u64 perf_event_time_now(struct perf_event *event, u64 now)
{
struct perf_event_context *ctx = event->ctx;
if (unlikely(!ctx))
return 0;
if (is_cgroup_event(event))
return perf_cgroup_event_time_now(event, now);
if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
return ctx->time;
now += READ_ONCE(ctx->timeoffset);
return now;
}
static enum event_type_t get_event_type(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
enum event_type_t event_type;
lockdep_assert_held(&ctx->lock);
if (event->group_leader != event)
event = event->group_leader;
event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
if (!ctx->task)
event_type |= EVENT_CPU;
return event_type;
}
static void init_event_group(struct perf_event *event)
{
RB_CLEAR_NODE(&event->group_node);
event->group_index = 0;
}
static struct perf_event_groups *
get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
static void perf_event_groups_init(struct perf_event_groups *groups)
{
groups->tree = RB_ROOT;
groups->index = 0;
}
static inline struct cgroup *event_cgroup(const struct perf_event *event)
{
struct cgroup *cgroup = NULL;
#ifdef CONFIG_CGROUP_PERF
if (event->cgrp)
cgroup = event->cgrp->css.cgroup;
#endif
return cgroup;
}
static __always_inline int
perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
const struct cgroup *left_cgroup, const u64 left_group_index,
const struct perf_event *right)
{
if (left_cpu < right->cpu)
return -1;
if (left_cpu > right->cpu)
return 1;
if (left_pmu) {
if (left_pmu < right->pmu_ctx->pmu)
return -1;
if (left_pmu > right->pmu_ctx->pmu)
return 1;
}
#ifdef CONFIG_CGROUP_PERF
{
const struct cgroup *right_cgroup = event_cgroup(right);
if (left_cgroup != right_cgroup) {
if (!left_cgroup) {
return -1;
}
if (!right_cgroup) {
return 1;
}
if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
return -1;
return 1;
}
}
#endif
if (left_group_index < right->group_index)
return -1;
if (left_group_index > right->group_index)
return 1;
return 0;
}
#define __node_2_pe(node) \
rb_entry((node), struct perf_event, group_node)
static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
{
struct perf_event *e = __node_2_pe(a);
return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
e->group_index, __node_2_pe(b)) < 0;
}
struct __group_key {
int cpu;
struct pmu *pmu;
struct cgroup *cgroup;
};
static inline int __group_cmp(const void *key, const struct rb_node *node)
{
const struct __group_key *a = key;
const struct perf_event *b = __node_2_pe(node);
return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
}
static inline int
__group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
{
const struct __group_key *a = key;
const struct perf_event *b = __node_2_pe(node);
return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
b->group_index, b);
}
static void
perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event)
{
event->group_index = ++groups->index;
rb_add(&event->group_node, &groups->tree, __group_less);
}
static void
add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_event_groups *groups;
groups = get_event_groups(event, ctx);
perf_event_groups_insert(groups, event);
}
static void
perf_event_groups_delete(struct perf_event_groups *groups,
struct perf_event *event)
{
WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
RB_EMPTY_ROOT(&groups->tree));
rb_erase(&event->group_node, &groups->tree);
init_event_group(event);
}
static void
del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_event_groups *groups;
groups = get_event_groups(event, ctx);
perf_event_groups_delete(groups, event);
}
static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
struct pmu *pmu, struct cgroup *cgrp)
{
struct __group_key key = {
.cpu = cpu,
.pmu = pmu,
.cgroup = cgrp,
};
struct rb_node *node;
node = rb_find_first(&key, &groups->tree, __group_cmp);
if (node)
return __node_2_pe(node);
return NULL;
}
static struct perf_event *
perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
{
struct __group_key key = {
.cpu = event->cpu,
.pmu = pmu,
.cgroup = event_cgroup(event),
};
struct rb_node *next;
next = rb_next_match(&key, &event->group_node, __group_cmp);
if (next)
return __node_2_pe(next);
return NULL;
}
#define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \
for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
event; event = perf_event_groups_next(event, pmu))
#define perf_event_groups_for_each(event, groups) \
for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
typeof(*event), group_node); event; \
event = rb_entry_safe(rb_next(&event->group_node), \
typeof(*event), group_node))
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
lockdep_assert_held(&ctx->lock);
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
event->tstamp = perf_event_time(event);
if (event->group_leader == event) {
event->group_caps = event->event_caps;
add_event_to_groups(event, ctx);
}
list_add_rcu(&event->event_entry, &ctx->event_list);
ctx->nr_events++;
if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
ctx->nr_user++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
if (event->state > PERF_EVENT_STATE_OFF)
perf_cgroup_event_enable(event, ctx);
ctx->generation++;
event->pmu_ctx->nr_events++;
}
static inline void perf_event__state_init(struct perf_event *event)
{
event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
PERF_EVENT_STATE_INACTIVE;
}
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
{
int entry = sizeof(u64);
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_LOST)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
{
struct perf_sample_data *data;
u16 size = 0;
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
if (sample_type & PERF_SAMPLE_ADDR)
size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
size += sizeof(data->weight.full);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
if (sample_type & PERF_SAMPLE_DATA_SRC)
size += sizeof(data->data_src.val);
if (sample_type & PERF_SAMPLE_TRANSACTION)
size += sizeof(data->txn);
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
size += sizeof(data->phys_addr);
if (sample_type & PERF_SAMPLE_CGROUP)
size += sizeof(data->cgroup);
if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
size += sizeof(data->data_page_size);
if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
size += sizeof(data->code_page_size);
event->header_size = size;
}
static void perf_event__header_size(struct perf_event *event)
{
__perf_event_read_size(event,
event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu_entry);
event->id_header_size = size;
}
static bool perf_event_validate_size(struct perf_event *event)
{
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
perf_event__id_header_size(event);
if (event->read_size + event->header_size +
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
return false;
return true;
}
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
lockdep_assert_held(&event->ctx->lock);
if (event->attach_state & PERF_ATTACH_GROUP)
return;
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
return;
WARN_ON_ONCE(group_leader->ctx != event->ctx);
group_leader->group_caps &= event->event_caps;
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
perf_event__header_size(group_leader);
for_each_sibling_event(pos, group_leader)
perf_event__header_size(pos);
}
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
ctx->nr_events--;
if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
ctx->nr_user--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
del_event_from_groups(event, ctx);
if (event->state > PERF_EVENT_STATE_OFF) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_OFF);
}
ctx->generation++;
event->pmu_ctx->nr_events--;
}
static int
perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
{
if (!has_aux(aux_event))
return 0;
if (!event->pmu->aux_output_match)
return 0;
return event->pmu->aux_output_match(aux_event);
}
static void put_event(struct perf_event *event);
static void event_sched_out(struct perf_event *event,
struct perf_event_context *ctx);
static void perf_put_aux_event(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *iter;
if (event->aux_event) {
iter = event->aux_event;
event->aux_event = NULL;
put_event(iter);
return;
}
for_each_sibling_event(iter, event->group_leader) {
if (iter->aux_event != event)
continue;
iter->aux_event = NULL;
put_event(event);
event_sched_out(iter, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
}
}
static bool perf_need_aux_event(struct perf_event *event)
{
return !!event->attr.aux_output || !!event->attr.aux_sample_size;
}
static int perf_get_aux_event(struct perf_event *event,
struct perf_event *group_leader)
{
if (!group_leader)
return 0;
if (event->attr.aux_output && event->attr.aux_sample_size)
return 0;
if (event->attr.aux_output &&
!perf_aux_output_match(event, group_leader))
return 0;
if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
return 0;
if (!atomic_long_inc_not_zero(&group_leader->refcount))
return 0;
event->aux_event = group_leader;
return 1;
}
static inline struct list_head *get_event_list(struct perf_event *event)
{
return event->attr.pinned ? &event->pmu_ctx->pinned_active :
&event->pmu_ctx->flexible_active;
}
static inline void perf_remove_sibling_event(struct perf_event *event)
{
event_sched_out(event, event->ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
}
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct perf_event *sibling, *tmp;
struct perf_event_context *ctx = event->ctx;
lockdep_assert_held(&ctx->lock);
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
perf_put_aux_event(event);
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
goto out;
}
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
if (sibling->event_caps & PERF_EV_CAP_SIBLING)
perf_remove_sibling_event(sibling);
sibling->group_leader = sibling;
list_del_init(&sibling->sibling_list);
sibling->group_caps = event->group_caps;
if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
add_event_to_groups(sibling, event->ctx);
if (sibling->state == PERF_EVENT_STATE_ACTIVE)
list_add_tail(&sibling->active_list, get_event_list(sibling));
}
WARN_ON_ONCE(sibling->ctx != event->ctx);
}
out:
for_each_sibling_event(tmp, leader)
perf_event__header_size(tmp);
perf_event__header_size(leader);
}
static void sync_child_event(struct perf_event *child_event);
static void perf_child_detach(struct perf_event *event)
{
struct perf_event *parent_event = event->parent;
if (!(event->attach_state & PERF_ATTACH_CHILD))
return;
event->attach_state &= ~PERF_ATTACH_CHILD;
if (WARN_ON_ONCE(!parent_event))
return;
lockdep_assert_held(&parent_event->child_mutex);
sync_child_event(event);
list_del_init(&event->child_list);
}
static bool is_orphaned_event(struct perf_event *event)
{
return event->state == PERF_EVENT_STATE_DEAD;
}
static inline int
event_filter_match(struct perf_event *event)
{
return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
perf_cgroup_match(event);
}
static void
event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_event_pmu_context *epc = event->pmu_ctx;
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
list_del_init(&event->active_list);
perf_pmu_disable(event->pmu);
event->pmu->del(event, 0);
event->oncpu = -1;
if (event->pending_disable) {
event->pending_disable = 0;
perf_cgroup_event_disable(event, ctx);
state = PERF_EVENT_STATE_OFF;
}
if (event->pending_sigtrap) {
bool dec = true;
event->pending_sigtrap = 0;
if (state != PERF_EVENT_STATE_OFF &&
!event->pending_work) {
event->pending_work = 1;
dec = false;
WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
task_work_add(current, &event->pending_task, TWA_RESUME);
}
if (dec)
local_dec(&event->ctx->nr_pending);
}
perf_event_set_state(event, state);
if (!is_software_event(event))
cpc->active_oncpu--;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpc->active_oncpu)
cpc->exclusive = 0;
perf_pmu_enable(event->pmu);
}
static void
group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
{
struct perf_event *event;
if (group_event->state != PERF_EVENT_STATE_ACTIVE)
return;
perf_assert_pmu_disabled(group_event->pmu_ctx->pmu);
event_sched_out(group_event, ctx);
for_each_sibling_event(event, group_event)
event_sched_out(event, ctx);
}
#define DETACH_GROUP 0x01UL
#define DETACH_CHILD 0x02UL
#define DETACH_DEAD 0x04UL
static void
__perf_remove_from_context(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
unsigned long flags = (unsigned long)info;
if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx, false);
}
if (flags & DETACH_DEAD)
event->pending_disable = 1;
event_sched_out(event, ctx);
if (flags & DETACH_GROUP)
perf_group_detach(event);
if (flags & DETACH_CHILD)
perf_child_detach(event);
list_del_event(event, ctx);
if (flags & DETACH_DEAD)
event->state = PERF_EVENT_STATE_DEAD;
if (!pmu_ctx->nr_events) {
pmu_ctx->rotate_necessary = 0;
if (ctx->task && ctx->is_active) {
struct perf_cpu_pmu_context *cpc;
cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
cpc->task_epc = NULL;
}
}
if (!ctx->nr_events && ctx->is_active) {
if (ctx == &cpuctx->ctx)
update_cgrp_time_from_cpuctx(cpuctx, true);
ctx->is_active = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx->task_ctx = NULL;
}
}
}
static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
{
struct perf_event_context *ctx = event->ctx;
lockdep_assert_held(&ctx->mutex);
raw_spin_lock_irq(&ctx->lock);
if (!ctx->is_active) {
__perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context),
ctx, (void *)flags);
raw_spin_unlock_irq(&ctx->lock);
return;
}
raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_remove_from_context, (void *)flags);
}
static void __perf_event_disable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return;
if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
perf_pmu_disable(event->pmu_ctx->pmu);
if (event == event->group_leader)
group_sched_out(event, ctx);
else
event_sched_out(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_OFF);
perf_cgroup_event_disable(event, ctx);
perf_pmu_enable(event->pmu_ctx->pmu);
}
static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
raw_spin_lock_irq(&ctx->lock);
if (event->state <= PERF_EVENT_STATE_OFF) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_event_disable, NULL);
}
void perf_event_disable_local(struct perf_event *event)
{
event_function_local(event, __perf_event_disable, NULL);
}
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_disable(event);
perf_event_ctx_unlock(event, ctx);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
void perf_event_disable_inatomic(struct perf_event *event)
{
event->pending_disable = 1;
irq_work_queue(&event->pending_irq);
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static void perf_log_itrace_start(struct perf_event *event);
static int
event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_event_pmu_context *epc = event->pmu_ctx;
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
int ret = 0;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
WRITE_ONCE(event->oncpu, smp_processor_id());
smp_wmb();
perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
perf_pmu_disable(event->pmu);
perf_log_itrace_start(event);
if (event->pmu->add(event, PERF_EF_START)) {
perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
event->oncpu = -1;
ret = -EAGAIN;
goto out;
}
if (!is_software_event(event))
cpc->active_oncpu++;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
if (event->attr.exclusive)
cpc->exclusive = 1;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static int
group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu_ctx->pmu;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
if (event_sched_in(group_event, ctx))
goto error;
for_each_sibling_event(event, group_event) {
if (event_sched_in(event, ctx)) {
partial_group = event;
goto group_error;
}
}
if (!pmu->commit_txn(pmu))
return 0;
group_error:
for_each_sibling_event(event, group_event) {
if (event == partial_group)
break;
event_sched_out(event, ctx);
}
event_sched_out(group_event, ctx);
error:
pmu->cancel_txn(pmu);
return -EAGAIN;
}
static int group_can_go_on(struct perf_event *event, int can_add_hw)
{
struct perf_event_pmu_context *epc = event->pmu_ctx;
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
if (event->group_caps & PERF_EV_CAP_SOFTWARE)
return 1;
if (cpc->exclusive)
return 0;
if (event->attr.exclusive && !list_empty(get_event_list(event)))
return 0;
return can_add_hw;
}
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
list_add_event(event, ctx);
perf_group_attach(event);
}
static void task_ctx_sched_out(struct perf_event_context *ctx,
enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
ctx_sched_out(ctx, event_type);
}
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
ctx_sched_in(&cpuctx->ctx, EVENT_PINNED);
if (ctx)
ctx_sched_in(ctx, EVENT_PINNED);
ctx_sched_in(&cpuctx->ctx, EVENT_FLEXIBLE);
if (ctx)
ctx_sched_in(ctx, EVENT_FLEXIBLE);
}
static void ctx_resched(struct perf_cpu_context *cpuctx,
struct perf_event_context *task_ctx,
enum event_type_t event_type)
{
bool cpu_event = !!(event_type & EVENT_CPU);
if (event_type & EVENT_PINNED)
event_type |= EVENT_FLEXIBLE;
event_type &= EVENT_ALL;
perf_ctx_disable(&cpuctx->ctx);
if (task_ctx) {
perf_ctx_disable(task_ctx);
task_ctx_sched_out(task_ctx, event_type);
}
if (cpu_event)
ctx_sched_out(&cpuctx->ctx, event_type);
else if (event_type & EVENT_PINNED)
ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, task_ctx);
perf_ctx_enable(&cpuctx->ctx);
if (task_ctx)
perf_ctx_enable(task_ctx);
}
void perf_pmu_resched(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
perf_ctx_lock(cpuctx, task_ctx);
ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
perf_ctx_unlock(cpuctx, task_ctx);
}
static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
reprogram = (ctx->task == current);
if (task_curr(ctx->task) && !reprogram) {
ret = -ESRCH;
goto unlock;
}
WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
#ifdef CONFIG_CGROUP_PERF
if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
reprogram = cgroup_is_descendant(cgrp->css.cgroup,
event->cgrp->css.cgroup);
}
#endif
if (reprogram) {
ctx_sched_out(ctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx, get_event_type(event));
} else {
add_event_to_ctx(event, ctx);
}
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
return ret;
}
static bool exclusive_event_installable(struct perf_event *event,
struct perf_event_context *ctx);
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
struct task_struct *task = READ_ONCE(ctx->task);
lockdep_assert_held(&ctx->mutex);
WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
if (event->cpu != -1)
WARN_ON_ONCE(event->cpu != cpu);
smp_store_release(&event->ctx, ctx);
if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
ctx->nr_events && !is_cgroup_event(event)) {
raw_spin_lock_irq(&ctx->lock);
if (ctx->task == TASK_TOMBSTONE) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
return;
}
if (!task) {
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
return;
smp_mb();
again:
if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
task = ctx->task;
if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
raw_spin_unlock_irq(&ctx->lock);
return;
}
if (task_curr(task)) {
raw_spin_unlock_irq(&ctx->lock);
goto again;
}
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
static void __perf_event_enable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
struct perf_event *leader = event->group_leader;
struct perf_event_context *task_ctx;
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state <= PERF_EVENT_STATE_ERROR)
return;
if (ctx->is_active)
ctx_sched_out(ctx, EVENT_TIME);
perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
perf_cgroup_event_enable(event, ctx);
if (!ctx->is_active)
return;
if (!event_filter_match(event)) {
ctx_sched_in(ctx, EVENT_TIME);
return;
}
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
ctx_sched_in(ctx, EVENT_TIME);
return;
}
task_ctx = cpuctx->task_ctx;
if (ctx->task)
WARN_ON_ONCE(task_ctx != ctx);
ctx_resched(cpuctx, task_ctx, get_event_type(event));
}
static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state < PERF_EVENT_STATE_ERROR) {
out:
raw_spin_unlock_irq(&ctx->lock);
return;
}
if (event->state == PERF_EVENT_STATE_ERROR) {
if (event->event_caps & PERF_EV_CAP_SIBLING &&
event->group_leader == event)
goto out;
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_event_enable, NULL);
}
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_enable(event);
perf_event_ctx_unlock(event, ctx);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
struct stop_event_data {
struct perf_event *event;
unsigned int restart;
};
static int __perf_event_stop(void *info)
{
struct stop_event_data *sd = info;
struct perf_event *event = sd->event;
if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
return 0;
smp_rmb();
if (READ_ONCE(event->oncpu) != smp_processor_id())
return -EAGAIN;
event->pmu->stop(event, PERF_EF_UPDATE);
if (sd->restart)
event->pmu->start(event, 0);
return 0;
}
static int perf_event_stop(struct perf_event *event, int restart)
{
struct stop_event_data sd = {
.event = event,
.restart = restart,
};
int ret = 0;
do {
if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
return 0;
smp_rmb();
ret = cpu_function_call(READ_ONCE(event->oncpu),
__perf_event_stop, &sd);
} while (ret == -EAGAIN);
return ret;
}
void perf_event_addr_filters_sync(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
if (!has_addr_filter(event))
return;
raw_spin_lock(&ifh->lock);
if (event->addr_filters_gen != event->hw.addr_filters_gen) {
event->pmu->addr_filters_sync(event);
event->hw.addr_filters_gen = event->addr_filters_gen;
}
raw_spin_unlock(&ifh->lock);
}
EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
static int _perf_event_refresh(struct perf_event *event, int refresh)
{
if (event->attr.inherit || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
_perf_event_enable(event);
return 0;
}
int perf_event_refresh(struct perf_event *event, int refresh)
{
struct perf_event_context *ctx;
int ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_event_refresh(event, refresh);
perf_event_ctx_unlock(event, ctx);
return ret;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static int perf_event_modify_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr)
{
int err;
_perf_event_disable(bp);
err = modify_user_hw_breakpoint_check(bp, attr, true);
if (!bp->attr.disabled)
_perf_event_enable(bp);
return err;
}
static void perf_event_modify_copy_attr(struct perf_event_attr *to,
const struct perf_event_attr *from)
{
to->sig_data = from->sig_data;
}
static int perf_event_modify_attr(struct perf_event *event,
struct perf_event_attr *attr)
{
int (*func)(struct perf_event *, struct perf_event_attr *);
struct perf_event *child;
int err;
if (event->attr.type != attr->type)
return -EINVAL;
switch (event->attr.type) {
case PERF_TYPE_BREAKPOINT:
func = perf_event_modify_breakpoint;
break;
default:
return -EOPNOTSUPP;
}
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
perf_event_modify_copy_attr(&event->attr, attr);
err = func(event, attr);
if (err)
goto out;
list_for_each_entry(child, &event->child_list, child_list) {
perf_event_modify_copy_attr(&child->attr, attr);
err = func(child, attr);
if (err)
goto out;
}
out:
mutex_unlock(&event->child_mutex);
return err;
}
static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
enum event_type_t event_type)
{
struct perf_event_context *ctx = pmu_ctx->ctx;
struct perf_event *event, *tmp;
struct pmu *pmu = pmu_ctx->pmu;
if (ctx->task && !ctx->is_active) {
struct perf_cpu_pmu_context *cpc;
cpc = this_cpu_ptr(pmu->cpu_pmu_context);
WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
cpc->task_epc = NULL;
}
if (!event_type)
return;
perf_pmu_disable(pmu);
if (event_type & EVENT_PINNED) {
list_for_each_entry_safe(event, tmp,
&pmu_ctx->pinned_active,
active_list)
group_sched_out(event, ctx);
}
if (event_type & EVENT_FLEXIBLE) {
list_for_each_entry_safe(event, tmp,
&pmu_ctx->flexible_active,
active_list)
group_sched_out(event, ctx);
pmu_ctx->rotate_necessary = 0;
}
perf_pmu_enable(pmu);
}
static void
ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events)) {
WARN_ON_ONCE(ctx->is_active);
if (ctx->task)
WARN_ON_ONCE(cpuctx->task_ctx);
return;
}
if (is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
barrier();
}
ctx->is_active &= ~event_type;
if (!(ctx->is_active & EVENT_ALL))
ctx->is_active = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
is_active ^= ctx->is_active;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
__pmu_ctx_sched_out(pmu_ctx, is_active);
}
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
lockdep_assert_held(&ctx1->lock);
lockdep_assert_held(&ctx2->lock);
if (ctx1->pin_count || ctx2->pin_count)
return 0;
if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
return 1;
if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
return 1;
if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
ctx1->parent_gen == ctx2->parent_gen)
return 1;
return 0;
}
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
u64 value;
if (!event->attr.inherit_stat)
return;
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
perf_event_update_time(event);
value = local64_read(&next_event->count);
value = local64_xchg(&event->count, value);
local64_set(&next_event->count, value);
swap(event->total_time_enabled, next_event->total_time_enabled);
swap(event->total_time_running, next_event->total_time_running);
perf_event_update_userpage(event);
perf_event_update_userpage(next_event);
}
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{
struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
update_context_time(ctx);
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
next_event = list_first_entry(&next_ctx->event_list,
struct perf_event, event_entry);
while (&event->event_entry != &ctx->event_list &&
&next_event->event_entry != &next_ctx->event_list) {
__perf_event_sync_stat(event, next_event);
event = list_next_entry(event, event_entry);
next_event = list_next_entry(next_event, event_entry);
}
}
#define double_list_for_each_entry(pos1, pos2, head1, head2, member) \
for (pos1 = list_first_entry(head1, typeof(*pos1), member), \
pos2 = list_first_entry(head2, typeof(*pos2), member); \
!list_entry_is_head(pos1, head1, member) && \
!list_entry_is_head(pos2, head2, member); \
pos1 = list_next_entry(pos1, member), \
pos2 = list_next_entry(pos2, member))
static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx,
struct perf_event_context *next_ctx)
{
struct perf_event_pmu_context *prev_epc, *next_epc;
if (!prev_ctx->nr_task_data)
return;
double_list_for_each_entry(prev_epc, next_epc,
&prev_ctx->pmu_ctx_list, &next_ctx->pmu_ctx_list,
pmu_ctx_entry) {
if (WARN_ON_ONCE(prev_epc->pmu != next_epc->pmu))
continue;
if (prev_epc->pmu->swap_task_ctx)
prev_epc->pmu->swap_task_ctx(prev_epc, next_epc);
else
swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
}
}
static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in)
{
struct perf_event_pmu_context *pmu_ctx;
struct perf_cpu_pmu_context *cpc;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
pmu_ctx->pmu->sched_task(pmu_ctx, sched_in);
}
}
static void
perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx;
struct perf_event_context *parent, *next_parent;
int do_switch = 1;
if (likely(!ctx))
return;
rcu_read_lock();
next_ctx = rcu_dereference(next->perf_event_ctxp);
if (!next_ctx)
goto unlock;
parent = rcu_dereference(ctx->parent_ctx);
next_parent = rcu_dereference(next_ctx->parent_ctx);
if (!parent && !next_parent)
goto unlock;
if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
perf_ctx_disable(ctx);
if (local_read(&ctx->nr_pending) ||
local_read(&next_ctx->nr_pending)) {
raw_spin_unlock(&next_ctx->lock);
rcu_read_unlock();
goto inside_switch;
}
WRITE_ONCE(ctx->task, next);
WRITE_ONCE(next_ctx->task, task);
perf_ctx_sched_task_cb(ctx, false);
perf_event_swap_task_ctx_data(ctx, next_ctx);
perf_ctx_enable(ctx);
RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
}
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
unlock:
rcu_read_unlock();
if (do_switch) {
raw_spin_lock(&ctx->lock);
perf_ctx_disable(ctx);
inside_switch:
perf_ctx_sched_task_cb(ctx, false);
task_ctx_sched_out(ctx, EVENT_ALL);
perf_ctx_enable(ctx);
raw_spin_unlock(&ctx->lock);
}
}
static DEFINE_PER_CPU(struct list_head, sched_cb_list);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
void perf_sched_cb_dec(struct pmu *pmu)
{
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
this_cpu_dec(perf_sched_cb_usages);
barrier();
if (!--cpc->sched_cb_usage)
list_del(&cpc->sched_cb_entry);
}
void perf_sched_cb_inc(struct pmu *pmu)
{
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
if (!cpc->sched_cb_usage++)
list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
barrier();
this_cpu_inc(perf_sched_cb_usages);
}
static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct pmu *pmu;
pmu = cpc->epc.pmu;
if (WARN_ON_ONCE(!pmu->sched_task))
return;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->sched_task(cpc->task_epc, sched_in);
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
static void perf_pmu_sched_task(struct task_struct *prev,
struct task_struct *next,
bool sched_in)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_cpu_pmu_context *cpc;
if (prev == next || cpuctx->task_ctx)
return;
list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
__perf_pmu_sched_task(cpc, sched_in);
}
static void perf_event_switch(struct task_struct *task,
struct task_struct *next_prev, bool sched_in);
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
if (__this_cpu_read(perf_sched_cb_usages))
perf_pmu_sched_task(task, next, false);
if (atomic_read(&nr_switch_events))
perf_event_switch(task, next, false);
perf_event_context_sched_out(task, next);
perf_cgroup_switch(next);
}
static bool perf_less_group_idx(const void *l, const void *r)
{
const struct perf_event *le = *(const struct perf_event **)l;
const struct perf_event *re = *(const struct perf_event **)r;
return le->group_index < re->group_index;
}
static void swap_ptr(void *l, void *r)
{
void **lp = l, **rp = r;
swap(*lp, *rp);
}
static const struct min_heap_callbacks perf_min_heap = {
.elem_size = sizeof(struct perf_event *),
.less = perf_less_group_idx,
.swp = swap_ptr,
};
static void __heap_add(struct min_heap *heap, struct perf_event *event)
{
struct perf_event **itrs = heap->data;
if (event) {
itrs[heap->nr] = event;
heap->nr++;
}
}
static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
{
struct perf_cpu_pmu_context *cpc;
if (!pmu_ctx->ctx->task)
return;
cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
cpc->task_epc = pmu_ctx;
}
static noinline int visit_groups_merge(struct perf_event_context *ctx,
struct perf_event_groups *groups, int cpu,
struct pmu *pmu,
int (*func)(struct perf_event *, void *),
void *data)
{
#ifdef CONFIG_CGROUP_PERF
struct cgroup_subsys_state *css = NULL;
#endif
struct perf_cpu_context *cpuctx = NULL;
struct perf_event *itrs[2];
struct min_heap event_heap;
struct perf_event **evt;
int ret;
if (pmu->filter && pmu->filter(pmu, cpu))
return 0;
if (!ctx->task) {
cpuctx = this_cpu_ptr(&perf_cpu_context);
event_heap = (struct min_heap){
.data = cpuctx->heap,
.nr = 0,
.size = cpuctx->heap_size,
};
lockdep_assert_held(&cpuctx->ctx.lock);
#ifdef CONFIG_CGROUP_PERF
if (cpuctx->cgrp)
css = &cpuctx->cgrp->css;
#endif
} else {
event_heap = (struct min_heap){
.data = itrs,
.nr = 0,
.size = ARRAY_SIZE(itrs),
};
__heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL));
}
evt = event_heap.data;
__heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
#ifdef CONFIG_CGROUP_PERF
for (; css; css = css->parent)
__heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
#endif
if (event_heap.nr) {
__link_epc((*evt)->pmu_ctx);
perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
}
min_heapify_all(&event_heap, &perf_min_heap);
while (event_heap.nr) {
ret = func(*evt, data);
if (ret)
return ret;
*evt = perf_event_groups_next(*evt, pmu);
if (*evt)
min_heapify(&event_heap, 0, &perf_min_heap);
else
min_heap_pop(&event_heap, &perf_min_heap);
}
return 0;
}
static inline bool event_update_userpage(struct perf_event *event)
{
if (likely(!atomic_read(&event->mmap_count)))
return false;
perf_event_update_time(event);
perf_event_update_userpage(event);
return true;
}
static inline void group_update_userpage(struct perf_event *group_event)
{
struct perf_event *event;
if (!event_update_userpage(group_event))
return;
for_each_sibling_event(event, group_event)
event_update_userpage(event);
}
static int merge_sched_in(struct perf_event *event, void *data)
{
struct perf_event_context *ctx = event->ctx;
int *can_add_hw = data;
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
if (!event_filter_match(event))
return 0;
if (group_can_go_on(event, *can_add_hw)) {
if (!group_sched_in(event, ctx))
list_add_tail(&event->active_list, get_event_list(event));
}
if (event->state == PERF_EVENT_STATE_INACTIVE) {
*can_add_hw = 0;
if (event->attr.pinned) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
} else {
struct perf_cpu_pmu_context *cpc;
event->pmu_ctx->rotate_necessary = 1;
cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
perf_mux_hrtimer_restart(cpc);
group_update_userpage(event);
}
}
return 0;
}
static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
{
struct perf_event_pmu_context *pmu_ctx;
int can_add_hw = 1;
if (pmu) {
visit_groups_merge(ctx, &ctx->pinned_groups,
smp_processor_id(), pmu,
merge_sched_in, &can_add_hw);
} else {
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
can_add_hw = 1;
visit_groups_merge(ctx, &ctx->pinned_groups,
smp_processor_id(), pmu_ctx->pmu,
merge_sched_in, &can_add_hw);
}
}
}
static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
{
struct perf_event_pmu_context *pmu_ctx;
int can_add_hw = 1;
if (pmu) {
visit_groups_merge(ctx, &ctx->flexible_groups,
smp_processor_id(), pmu,
merge_sched_in, &can_add_hw);
} else {
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
can_add_hw = 1;
visit_groups_merge(ctx, &ctx->flexible_groups,
smp_processor_id(), pmu_ctx->pmu,
merge_sched_in, &can_add_hw);
}
}
}
static void __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
{
ctx_flexible_sched_in(ctx, pmu);
}
static void
ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
int is_active = ctx->is_active;
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events))
return;
if (!(is_active & EVENT_TIME)) {
__update_context_time(ctx, false);
perf_cgroup_set_timestamp(cpuctx);
barrier();
}
ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
if (!is_active)
cpuctx->task_ctx = ctx;
else
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
}
is_active ^= ctx->is_active;
if (is_active & EVENT_PINNED)
ctx_pinned_sched_in(ctx, NULL);
if (is_active & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, NULL);
}
static void perf_event_context_sched_in(struct task_struct *task)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_context *ctx;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp);
if (!ctx)
goto rcu_unlock;
if (cpuctx->task_ctx == ctx) {
perf_ctx_lock(cpuctx, ctx);
perf_ctx_disable(ctx);
perf_ctx_sched_task_cb(ctx, true);
perf_ctx_enable(ctx);
perf_ctx_unlock(cpuctx, ctx);
goto rcu_unlock;
}
perf_ctx_lock(cpuctx, ctx);
if (!ctx->nr_events)
goto unlock;
perf_ctx_disable(ctx);
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
perf_ctx_disable(&cpuctx->ctx);
ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
}
perf_event_sched_in(cpuctx, ctx);
perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
perf_ctx_enable(&cpuctx->ctx);
perf_ctx_enable(ctx);
unlock:
perf_ctx_unlock(cpuctx, ctx);
rcu_unlock:
rcu_read_unlock();
}
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
perf_event_context_sched_in(task);
if (atomic_read(&nr_switch_events))
perf_event_switch(task, prev, true);
if (__this_cpu_read(perf_sched_cb_usages))
perf_pmu_sched_task(prev, task, true);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
}
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
s64 delta;
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8;
sample_period = hwc->sample_period + delta;
if (!sample_period)
sample_period = 1;
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
}
}
static void
perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
if (!(ctx->nr_freq || unthrottle))
return;
raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
perf_pmu_disable(event->pmu);
hwc = &event->hw;
if (hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
goto next;
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
next:
perf_pmu_enable(event->pmu);
}
raw_spin_unlock(&ctx->lock);
}
static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
{
if (ctx->rotate_disable)
return;
perf_event_groups_delete(&ctx->flexible_groups, event);
perf_event_groups_insert(&ctx->flexible_groups, event);
}
static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
{
struct perf_event *event;
struct rb_node *node;
struct rb_root *tree;
struct __group_key key = {
.pmu = pmu_ctx->pmu,
};
event = list_first_entry_or_null(&pmu_ctx->flexible_active,
struct perf_event, active_list);
if (event)
goto out;
tree = &pmu_ctx->ctx->flexible_groups.tree;
if (!pmu_ctx->ctx->task) {
key.cpu = smp_processor_id();
node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
if (node)
event = __node_2_pe(node);
goto out;
}
key.cpu = -1;
node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
if (node) {
event = __node_2_pe(node);
goto out;
}
key.cpu = smp_processor_id();
node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
if (node)
event = __node_2_pe(node);
out:
pmu_ctx->rotate_necessary = 0;
return event;
}
static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_pmu_context *cpu_epc, *task_epc = NULL;
struct perf_event *cpu_event = NULL, *task_event = NULL;
int cpu_rotate, task_rotate;
struct pmu *pmu;
cpu_epc = &cpc->epc;
pmu = cpu_epc->pmu;
task_epc = cpc->task_epc;
cpu_rotate = cpu_epc->rotate_necessary;
task_rotate = task_epc ? task_epc->rotate_necessary : 0;
if (!(cpu_rotate || task_rotate))
return false;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
if (task_rotate)
task_event = ctx_event_to_rotate(task_epc);
if (cpu_rotate)
cpu_event = ctx_event_to_rotate(cpu_epc);
if (task_event || (task_epc && cpu_event)) {
update_context_time(task_epc->ctx);
__pmu_ctx_sched_out(task_epc, EVENT_FLEXIBLE);
}
if (cpu_event) {
update_context_time(&cpuctx->ctx);
__pmu_ctx_sched_out(cpu_epc, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx, cpu_event);
__pmu_ctx_sched_in(&cpuctx->ctx, pmu);
}
if (task_event)
rotate_ctx(task_epc->ctx, task_event);
if (task_event || (task_epc && cpu_event))
__pmu_ctx_sched_in(task_epc->ctx, pmu);
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
return true;
}
void perf_event_task_tick(void)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_context *ctx;
int throttled;
lockdep_assert_irqs_disabled();
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
rcu_read_lock();
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_adjust_freq_unthr_context(ctx, !!throttled);
rcu_read_unlock();
}
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{
if (!event->attr.enable_on_exec)
return 0;
event->attr.enable_on_exec = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
return 1;
}
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
struct perf_event_context *clone_ctx = NULL;
enum event_type_t event_type = 0;
struct perf_cpu_context *cpuctx;
struct perf_event *event;
unsigned long flags;
int enabled = 0;
local_irq_save(flags);
if (WARN_ON_ONCE(current->perf_event_ctxp != ctx))
goto out;
if (!ctx->nr_events)
goto out;
cpuctx = this_cpu_ptr(&perf_cpu_context);
perf_ctx_lock(cpuctx, ctx);
ctx_sched_out(ctx, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry) {
enabled |= event_enable_on_exec(event, ctx);
event_type |= get_event_type(event);
}
if (enabled) {
clone_ctx = unclone_ctx(ctx);
ctx_resched(cpuctx, ctx, event_type);
} else {
ctx_sched_in(ctx, EVENT_TIME);
}
perf_ctx_unlock(cpuctx, ctx);
out:
local_irq_restore(flags);
if (clone_ctx)
put_ctx(clone_ctx);
}
static void perf_remove_from_owner(struct perf_event *event);
static void perf_event_exit_event(struct perf_event *event,
struct perf_event_context *ctx);
static void perf_event_remove_on_exec(struct perf_event_context *ctx)
{
struct perf_event_context *clone_ctx = NULL;
struct perf_event *event, *next;
unsigned long flags;
bool modified = false;
mutex_lock(&ctx->mutex);
if (WARN_ON_ONCE(ctx->task != current))
goto unlock;
list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
if (!event->attr.remove_on_exec)
continue;
if (!is_kernel_event(event))
perf_remove_from_owner(event);
modified = true;
perf_event_exit_event(event, ctx);
}
raw_spin_lock_irqsave(&ctx->lock, flags);
if (modified)
clone_ctx = unclone_ctx(ctx);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
unlock:
mutex_unlock(&ctx->mutex);
if (clone_ctx)
put_ctx(clone_ctx);
}
struct perf_read_data {
struct perf_event *event;
bool group;
int ret;
};
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
u16 local_pkg, event_pkg;
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
int local_cpu = smp_processor_id();
event_pkg = topology_physical_package_id(event_cpu);
local_pkg = topology_physical_package_id(local_cpu);
if (event_pkg == local_pkg)
return local_cpu;
}
return event_cpu;
}
static void __perf_event_read(void *info)
{
struct perf_read_data *data = info;
struct perf_event *sub, *event = data->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct pmu *pmu = event->pmu;
if (ctx->task && cpuctx->task_ctx != ctx)
return;
raw_spin_lock(&ctx->lock);
if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
perf_event_update_time(event);
if (data->group)
perf_event_update_sibling_time(event);
if (event->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
if (!data->group) {
pmu->read(event);
data->ret = 0;
goto unlock;
}
pmu->start_txn(pmu, PERF_PMU_TXN_READ);
pmu->read(event);
for_each_sibling_event(sub, event) {
if (sub->state == PERF_EVENT_STATE_ACTIVE) {
sub->pmu->read(sub);
}
}
data->ret = pmu->commit_txn(pmu);
unlock:
raw_spin_unlock(&ctx->lock);
}
static inline u64 perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = perf_event_time_now(event, *now);
__perf_update_times(event, ctx_time, enabled, running);
}
int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{
unsigned long flags;
int ret = 0;
local_irq_save(flags);
if (event->attr.inherit) {
ret = -EOPNOTSUPP;
goto out;
}
if ((event->attach_state & PERF_ATTACH_TASK) &&
event->hw.target != current) {
ret = -EINVAL;
goto out;
}
if (!(event->attach_state & PERF_ATTACH_TASK) &&
event->cpu != smp_processor_id()) {
ret = -EINVAL;
goto out;
}
if (event->attr.pinned && event->oncpu != smp_processor_id()) {
ret = -EBUSY;
goto out;
}
if (event->oncpu == smp_processor_id())
event->pmu->read(event);
*value = local64_read(&event->count);
if (enabled || running) {
u64 __enabled, __running, __now;
calc_timer_values(event, &__now, &__enabled, &__running);
if (enabled)
*enabled = __enabled;
if (running)
*running = __running;
}
out:
local_irq_restore(flags);
return ret;
}
static int perf_event_read(struct perf_event *event, bool group)
{
enum perf_event_state state = READ_ONCE(event->state);
int event_cpu, ret = 0;
again:
if (state == PERF_EVENT_STATE_ACTIVE) {
struct perf_read_data data;
smp_rmb();
event_cpu = READ_ONCE(event->oncpu);
if ((unsigned)event_cpu >= nr_cpu_ids)
return 0;
data = (struct perf_read_data){
.event = event,
.group = group,
.ret = 0,
};
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret;
} else if (state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
state = event->state;
if (state != PERF_EVENT_STATE_INACTIVE) {
raw_spin_unlock_irqrestore(&ctx->lock, flags);
goto again;
}
if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
perf_event_update_time(event);
if (group)
perf_event_update_sibling_time(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ret;
}
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->pmu_ctx_list);
perf_event_groups_init(&ctx->pinned_groups);
perf_event_groups_init(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
refcount_set(&ctx->refcount, 1);
}
static void
__perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
{
epc->pmu = pmu;
INIT_LIST_HEAD(&epc->pmu_ctx_entry);
INIT_LIST_HEAD(&epc->pinned_active);
INIT_LIST_HEAD(&epc->flexible_active);
atomic_set(&epc->refcount, 1);
}
static struct perf_event_context *
alloc_perf_context(struct task_struct *task)
{
struct perf_event_context *ctx;
ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!ctx)
return NULL;
__perf_event_init_context(ctx);
if (task)
ctx->task = get_task_struct(task);
return ctx;
}
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
rcu_read_lock();
if (!vpid)
task = current;
else
task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
return ERR_PTR(-ESRCH);
return task;
}
static struct perf_event_context *
find_get_context(struct task_struct *task, struct perf_event *event)
{
struct perf_event_context *ctx, *clone_ctx = NULL;
struct perf_cpu_context *cpuctx;
unsigned long flags;
int err;
if (!task) {
err = perf_allow_cpu(&event->attr);
if (err)
return ERR_PTR(err);
cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
raw_spin_lock_irqsave(&ctx->lock, flags);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return ctx;
}
err = -EINVAL;
retry:
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
clone_ctx = unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (clone_ctx)
put_ctx(clone_ctx);
} else {
ctx = alloc_perf_context(task);
err = -ENOMEM;
if (!ctx)
goto errout;
err = 0;
mutex_lock(&task->perf_event_mutex);
if (task->flags & PF_EXITING)
err = -ESRCH;
else if (task->perf_event_ctxp)
err = -EAGAIN;
else {
get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp, ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
goto errout;
}
}
return ctx;
errout:
return ERR_PTR(err);
}
static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
struct perf_event *event)
{
struct perf_event_pmu_context *new = NULL, *epc;
void *task_ctx_data = NULL;
if (!ctx->task) {
struct perf_cpu_pmu_context *cpc;
cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
epc = &cpc->epc;
raw_spin_lock_irq(&ctx->lock);
if (!epc->ctx) {
atomic_set(&epc->refcount, 1);
epc->embedded = 1;
list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
epc->ctx = ctx;
} else {
WARN_ON_ONCE(epc->ctx != ctx);
atomic_inc(&epc->refcount);
}
raw_spin_unlock_irq(&ctx->lock);
return epc;
}
new = kzalloc(sizeof(*epc), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
if (event->attach_state & PERF_ATTACH_TASK_DATA) {
task_ctx_data = alloc_task_ctx_data(pmu);
if (!task_ctx_data) {
kfree(new);
return ERR_PTR(-ENOMEM);
}
}
__perf_init_event_pmu_context(new, pmu);
raw_spin_lock_irq(&ctx->lock);
list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) {
if (epc->pmu == pmu) {
WARN_ON_ONCE(epc->ctx != ctx);
atomic_inc(&epc->refcount);
goto found_epc;
}
}
epc = new;
new = NULL;
list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
epc->ctx = ctx;
found_epc:
if (task_ctx_data && !epc->task_ctx_data) {
epc->task_ctx_data = task_ctx_data;
task_ctx_data = NULL;
ctx->nr_task_data++;
}
raw_spin_unlock_irq(&ctx->lock);
free_task_ctx_data(pmu, task_ctx_data);
kfree(new);
return epc;
}
static void get_pmu_ctx(struct perf_event_pmu_context *epc)
{
WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount));
}
static void free_epc_rcu(struct rcu_head *head)
{
struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
kfree(epc->task_ctx_data);
kfree(epc);
}
static void put_pmu_ctx(struct perf_event_pmu_context *epc)
{
struct perf_event_context *ctx = epc->ctx;
unsigned long flags;
if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
return;
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
list_del_init(&epc->pmu_ctx_entry);
epc->ctx = NULL;
WARN_ON_ONCE(!list_empty(&epc->pinned_active));
WARN_ON_ONCE(!list_empty(&epc->flexible_active));
raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (epc->embedded)
return;
call_rcu(&epc->rcu_head, free_epc_rcu);
}
static void perf_event_free_filter(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event = container_of(head, typeof(*event), rcu_head);
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
kmem_cache_free(perf_event_cache, event);
}
static void ring_buffer_attach(struct perf_event *event,
struct perf_buffer *rb);
static void detach_sb_event(struct perf_event *event)
{
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
raw_spin_lock(&pel->lock);
list_del_rcu(&event->sb_list);
raw_spin_unlock(&pel->lock);
}
static bool is_sb_event(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
if (event->parent)
return false;
if (event->attach_state & PERF_ATTACH_TASK)
return false;
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
attr->comm || attr->comm_exec ||
attr->task || attr->ksymbol ||
attr->context_switch || attr->text_poke ||
attr->bpf_event)
return true;
return false;
}
static void unaccount_pmu_sb_event(struct perf_event *event)
{
if (is_sb_event(event))
detach_sb_event(event);
}
#ifdef CONFIG_NO_HZ_FULL
static DEFINE_SPINLOCK(nr_freq_lock);
#endif
static void unaccount_freq_event_nohz(void)
{
#ifdef CONFIG_NO_HZ_FULL
spin_lock(&nr_freq_lock);
if (atomic_dec_and_test(&nr_freq_events))
tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
spin_unlock(&nr_freq_lock);
#endif
}
static void unaccount_freq_event(void)
{
if (tick_nohz_full_enabled())
unaccount_freq_event_nohz();
else
atomic_dec(&nr_freq_events);
}
static void unaccount_event(struct perf_event *event)
{
bool dec = false;
if (event->parent)
return;
if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
dec = true;
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.build_id)
atomic_dec(&nr_build_id_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.namespaces)
atomic_dec(&nr_namespaces_events);
if (event->attr.cgroup)
atomic_dec(&nr_cgroup_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.freq)
unaccount_freq_event();
if (event->attr.context_switch) {
dec = true;
atomic_dec(&nr_switch_events);
}
if (is_cgroup_event(event))
dec = true;
if (has_branch_stack(event))
dec = true;
if (event->attr.ksymbol)
atomic_dec(&nr_ksymbol_events);
if (event->attr.bpf_event)
atomic_dec(&nr_bpf_events);
if (event->attr.text_poke)
atomic_dec(&nr_text_poke_events);
if (dec) {
if (!atomic_add_unless(&perf_sched_count, -1, 1))
schedule_delayed_work(&perf_sched_work, HZ);
}
unaccount_pmu_sb_event(event);
}
static void perf_sched_delayed(struct work_struct *work)
{
mutex_lock(&perf_sched_mutex);
if (atomic_dec_and_test(&perf_sched_count))
static_branch_disable(&perf_sched_events);
mutex_unlock(&perf_sched_mutex);
}
static int exclusive_event_init(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
if (!is_exclusive_pmu(pmu))
return 0;
if (event->attach_state & PERF_ATTACH_TASK) {
if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
return -EBUSY;
} else {
if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
return -EBUSY;
}
return 0;
}
static void exclusive_event_destroy(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
if (!is_exclusive_pmu(pmu))
return;
if (event->attach_state & PERF_ATTACH_TASK)
atomic_dec(&pmu->exclusive_cnt);
else
atomic_inc(&pmu->exclusive_cnt);
}
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
{
if ((e1->pmu == e2->pmu) &&
(e1->cpu == e2->cpu ||
e1->cpu == -1 ||
e2->cpu == -1))
return true;
return false;
}
static bool exclusive_event_installable(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *iter_event;
struct pmu *pmu = event->pmu;
lockdep_assert_held(&ctx->mutex);
if (!is_exclusive_pmu(pmu))
return true;
list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
if (exclusive_event_match(iter_event, event))
return false;
}
return true;
}
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head);
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
unaccount_event(event);
security_perf_event_free(event);
if (event->rb) {
mutex_lock(&event->mmap_mutex);
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
}
perf_event_free_bpf_prog(event);
perf_addr_filters_splice(event, NULL);
kfree(event->addr_filter_ranges);
if (event->destroy)
event->destroy(event);
if (event->hw.target)
put_task_struct(event->hw.target);
if (event->pmu_ctx)
put_pmu_ctx(event->pmu_ctx);
if (event->ctx)
put_ctx(event->ctx);
exclusive_event_destroy(event);
module_put(event->pmu->module);
call_rcu(&event->rcu_head, free_event_rcu);
}
static void free_event(struct perf_event *event)
{
if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
"unexpected event refcount: %ld; ptr=%p\n",
atomic_long_read(&event->refcount), event)) {
return;
}
_free_event(event);
}
static void perf_remove_from_owner(struct perf_event *event)
{
struct task_struct *owner;
rcu_read_lock();
owner = READ_ONCE(event->owner);
if (owner) {
get_task_struct(owner);
}
rcu_read_unlock();
if (owner) {
mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
if (event->owner) {
list_del_init(&event->owner_entry);
smp_store_release(&event->owner, NULL);
}
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
}
static void put_event(struct perf_event *event)
{
if (!atomic_long_dec_and_test(&event->refcount))
return;
_free_event(event);
}
int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
LIST_HEAD(free_list);
if (!ctx) {
WARN_ON_ONCE(event->attach_state &
(PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
goto no_ctx;
}
if (!is_kernel_event(event))
perf_remove_from_owner(event);
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(ctx->parent_ctx);
perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
perf_event_ctx_unlock(event, ctx);
again:
mutex_lock(&event->child_mutex);
list_for_each_entry(child, &event->child_list, child_list) {
ctx = READ_ONCE(child->ctx);
get_ctx(ctx);
mutex_unlock(&event->child_mutex);
mutex_lock(&ctx->mutex);
mutex_lock(&event->child_mutex);
tmp = list_first_entry_or_null(&event->child_list,
struct perf_event, child_list);
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
list_move(&child->child_list, &free_list);
put_event(event);
}
mutex_unlock(&event->child_mutex);
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
goto again;
}
mutex_unlock(&event->child_mutex);
list_for_each_entry_safe(child, tmp, &free_list, child_list) {
void *var = &child->ctx->refcount;
list_del(&child->child_list);
free_event(child);
smp_mb();
wake_up_var(var);
}
no_ctx:
put_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
static int perf_release(struct inode *inode, struct file *file)
{
perf_event_release_kernel(file->private_data);
return 0;
}
static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
(void)perf_event_read(event, false);
total += perf_event_count(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
(void)perf_event_read(child, false);
total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
mutex_unlock(&event->child_mutex);
return total;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event_context *ctx;
u64 count;
ctx = perf_event_ctx_lock(event);
count = __perf_event_read_value(event, enabled, running);
perf_event_ctx_unlock(event, ctx);
return count;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
struct perf_event *sub;
unsigned long flags;
int n = 1;
int ret;
ret = perf_event_read(leader, true);
if (ret)
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] += leader->total_time_enabled +
atomic64_read(&leader->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] += leader->total_time_running +
atomic64_read(&leader->child_total_time_running);
}
values[n++] += perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&leader->lost_samples);
for_each_sibling_event(sub, leader) {
values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&sub->lost_samples);
}
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
}
static int perf_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *child;
struct perf_event_context *ctx = leader->ctx;
int ret;
u64 *values;
lockdep_assert_held(&ctx->mutex);
values = kzalloc(event->read_size, GFP_KERNEL);
if (!values)
return -ENOMEM;
values[0] = 1 + leader->nr_siblings;
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
if (ret)
goto unlock;
list_for_each_entry(child, &leader->child_list, child_list) {
ret = __perf_read_group_add(child, read_format, values);
if (ret)
goto unlock;
}
mutex_unlock(&leader->child_mutex);
ret = event->read_size;
if (copy_to_user(buf, values, event->read_size))
ret = -EFAULT;
goto out;
unlock:
mutex_unlock(&leader->child_mutex);
out:
kfree(values);
return ret;
}
static int perf_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
u64 values[5];
int n = 0;
values[n++] = __perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&event->lost_samples);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
return n * sizeof(u64);
}
static bool is_event_hup(struct perf_event *event)
{
bool no_children;
if (event->state > PERF_EVENT_STATE_EXIT)
return false;
mutex_lock(&event->child_mutex);
no_children = list_empty(&event->child_list);
mutex_unlock(&event->child_mutex);
return no_children;
}
static ssize_t
__perf_read(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
if (count < event->read_size)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_read_group(event, read_format, buf);
else
ret = perf_read_one(event, read_format, buf);
return ret;
}
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
int ret;
ret = security_perf_event_read(event);
if (ret)
return ret;
ctx = perf_event_ctx_lock(event);
ret = __perf_read(event, buf, count);
perf_event_ctx_unlock(event, ctx);
return ret;
}
static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct perf_buffer *rb;
__poll_t events = EPOLLHUP;
poll_wait(file, &event->waitq, wait);
if (is_event_hup(event))
return events;
mutex_lock(&event->mmap_mutex);
rb = event->rb;
if (rb)
events = atomic_xchg(&rb->poll, 0);
mutex_unlock(&event->mmap_mutex);
return events;
}
static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event, false);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
u64 perf_event_pause(struct perf_event *event, bool reset)
{
struct perf_event_context *ctx;
u64 count;
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(event->attr.inherit);
_perf_event_disable(event);
count = local64_read(&event->count);
if (reset)
local64_set(&event->count, 0);
perf_event_ctx_unlock(event, ctx);
return count;
}
EXPORT_SYMBOL_GPL(perf_event_pause);
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
func(child);
mutex_unlock(&event->child_mutex);
}
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
lockdep_assert_held(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
for_each_sibling_event(sibling, event)
perf_event_for_each_child(sibling, func);
}
static void __perf_event_period(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
u64 value = *((u64 *)info);
bool active;
if (event->attr.freq) {
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->hw.sample_period = value;
}
active = (event->state == PERF_EVENT_STATE_ACTIVE);
if (active) {
perf_pmu_disable(event->pmu);
if (event->hw.interrupts == MAX_INTERRUPTS) {
event->hw.interrupts = 0;
perf_log_throttle(event, 1);
}
event->pmu->stop(event, PERF_EF_UPDATE);
}
local64_set(&event->hw.period_left, 0);
if (active) {
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(event->pmu);
}
}
static int perf_event_check_period(struct perf_event *event, u64 value)
{
return event->pmu->check_period(event, value);
}
static int _perf_event_period(struct perf_event *event, u64 value)
{
if (!is_sampling_event(event))
return -EINVAL;
if (!value)
return -EINVAL;
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
if (perf_event_check_period(event, value))
return -EINVAL;
if (!event->attr.freq && (value & (1ULL << 63)))
return -EINVAL;
event_function_call(event, __perf_event_period, &value);
return 0;
}
int perf_event_period(struct perf_event *event, u64 value)
{
struct perf_event_context *ctx;
int ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_event_period(event, value);
perf_event_ctx_unlock(event, ctx);
return ret;
}
EXPORT_SYMBOL_GPL(perf_event_period);
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
*p = f;
return 0;
}
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
{
u64 value;
if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
return -EFAULT;
return _perf_event_period(event, value);
}
case PERF_EVENT_IOC_ID:
{
u64 id = primary_event_id(event);
if (copy_to_user((void __user *)arg, &id, sizeof(id)))
return -EFAULT;
return 0;
}
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
case PERF_EVENT_IOC_SET_BPF:
{
struct bpf_prog *prog;
int err;
prog = bpf_prog_get(arg);
if (IS_ERR(prog))
return PTR_ERR(prog);
err = perf_event_set_bpf_prog(event, prog, 0);
if (err) {
bpf_prog_put(prog);
return err;
}
return 0;
}
case PERF_EVENT_IOC_PAUSE_OUTPUT: {
struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb || !rb->nr_pages) {
rcu_read_unlock();
return -EINVAL;
}
rb_toggle_paused(rb, !!arg);
rcu_read_unlock();
return 0;
}
case PERF_EVENT_IOC_QUERY_BPF:
return perf_event_query_prog_array(event, (void __user *)arg);
case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
struct perf_event_attr new_attr;
int err = perf_copy_attr((struct perf_event_attr __user *)arg,
&new_attr);
if (err)
return err;
return perf_event_modify_attr(event, &new_attr);
}
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
long ret;
ret = security_perf_event_write(event);
if (ret)
return ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_ioctl(event, cmd, arg);
perf_event_ctx_unlock(event, ctx);
return ret;
}
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (_IOC_NR(cmd)) {
case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
case _IOC_NR(PERF_EVENT_IOC_ID):
case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
cmd &= ~IOCSIZE_MASK;
cmd |= sizeof(void *) << IOCSIZE_SHIFT;
}
break;
}
return perf_ioctl(file, cmd, arg);
}
#else
# define perf_compat_ioctl NULL
#endif
int perf_event_task_enable(void)
{
struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
ctx = perf_event_ctx_lock(event);
perf_event_for_each_child(event, _perf_event_enable);
perf_event_ctx_unlock(event, ctx);
}
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
int perf_event_task_disable(void)
{
struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
ctx = perf_event_ctx_lock(event);
perf_event_for_each_child(event, _perf_event_disable);
perf_event_ctx_unlock(event, ctx);
}
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
return event->pmu->event_idx(event);
}
static void perf_event_init_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
userpg->cap_bit0_is_deprecated = 1;
userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
userpg->data_offset = PAGE_SIZE;
userpg->data_size = perf_data_size(rb);
unlock:
rcu_read_unlock();
}
void __weak arch_perf_update_userpage(
struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
{
}
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct perf_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
calc_timer_values(event, &now, &enabled, &running);
userpg = rb->user_page;
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
arch_perf_update_userpage(event, userpg, now);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(perf_event_update_userpage);
static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{
struct perf_event *event = vmf->vma->vm_file->private_data;
struct perf_buffer *rb;
vm_fault_t ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
ret = 0;
return ret;
}
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
get_page(vmf->page);
vmf->page->mapping = vmf->vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
rcu_read_unlock();
return ret;
}
static void ring_buffer_attach(struct perf_event *event,
struct perf_buffer *rb)
{
struct perf_buffer *old_rb = NULL;
unsigned long flags;
WARN_ON_ONCE(event->parent);
if (event->rb) {
WARN_ON_ONCE(event->rcu_pending);
old_rb = event->rb;
spin_lock_irqsave(&old_rb->event_lock, flags);
list_del_rcu(&event->rb_entry);
spin_unlock_irqrestore(&old_rb->event_lock, flags);
event->rcu_batches = get_state_synchronize_rcu();
event->rcu_pending = 1;
}
if (rb) {
if (event->rcu_pending) {
cond_synchronize_rcu(event->rcu_batches);
event->rcu_pending = 0;
}
spin_lock_irqsave(&rb->event_lock, flags);
list_add_rcu(&event->rb_entry, &rb->event_list);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
if (has_aux(event))
perf_event_stop(event, 0);
rcu_assign_pointer(event->rb, rb);
if (old_rb) {
ring_buffer_put(old_rb);
wake_up_all(&event->waitq);
}
}
static void ring_buffer_wakeup(struct perf_event *event)
{
struct perf_buffer *rb;
if (event->parent)
event = event->parent;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
}
rcu_read_unlock();
}
struct perf_buffer *ring_buffer_get(struct perf_event *event)
{
struct perf_buffer *rb;
if (event->parent)
event = event->parent;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
if (!refcount_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
return rb;
}
void ring_buffer_put(struct perf_buffer *rb)
{
if (!refcount_dec_and_test(&rb->refcount))
return;
WARN_ON_ONCE(!list_empty(&rb->event_list));
call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
if (vma->vm_pgoff)
atomic_inc(&event->rb->aux_mmap_count);
if (event->pmu->event_mapped)
event->pmu->event_mapped(event, vma->vm_mm);
}
static void perf_pmu_output_stop(struct perf_event *event);
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
struct perf_buffer *rb = ring_buffer_get(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
bool detach_rest = false;
if (event->pmu->event_unmapped)
event->pmu->event_unmapped(event, vma->vm_mm);
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
perf_pmu_output_stop(event);
atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
rb_free_aux(rb);
WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
mutex_unlock(&event->mmap_mutex);
}
if (atomic_dec_and_test(&rb->mmap_count))
detach_rest = true;
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
goto out_put;
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
if (!detach_rest)
goto out_put;
again:
rcu_read_lock();
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
if (!atomic_long_inc_not_zero(&event->refcount)) {
continue;
}
rcu_read_unlock();
mutex_lock(&event->mmap_mutex);
if (event->rb == rb)
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
put_event(event);
goto again;
}
rcu_read_unlock();
atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
&mmap_user->locked_vm);
atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
free_uid(mmap_user);
out_put:
ring_buffer_put(rb);
}
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
struct perf_buffer *rb = NULL;
unsigned long locked, lock_limit;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra = 0, extra = 0;
int ret = 0, flags = 0;
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
ret = security_perf_event_read(event);
if (ret)
return ret;
vma_size = vma->vm_end - vma->vm_start;
if (vma->vm_pgoff == 0) {
nr_pages = (vma_size / PAGE_SIZE) - 1;
} else {
u64 aux_offset, aux_size;
if (!event->rb)
return -EINVAL;
nr_pages = vma_size / PAGE_SIZE;
mutex_lock(&event->mmap_mutex);
ret = -EINVAL;
rb = event->rb;
if (!rb)
goto aux_unlock;
aux_offset = READ_ONCE(rb->user_page->aux_offset);
aux_size = READ_ONCE(rb->user_page->aux_size);
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
goto aux_unlock;
if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
goto aux_unlock;
if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
goto aux_unlock;
if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
goto aux_unlock;
if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
goto aux_unlock;
if (!is_power_of_2(nr_pages))
goto aux_unlock;
if (!atomic_inc_not_zero(&rb->mmap_count))
goto aux_unlock;
if (rb_has_aux(rb)) {
atomic_inc(&rb->aux_mmap_count);
ret = 0;
goto unlock;
}
atomic_set(&rb->aux_mmap_count, 1);
user_extra = nr_pages;
goto accounting;
}
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
if (data_page_nr(event->rb) != nr_pages) {
ret = -EINVAL;
goto unlock;
}
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
goto again;
}
goto unlock;
}
user_extra = nr_pages + 1;
accounting:
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm);
if (user_locked > user_lock_limit)
user_locked = user_lock_limit;
user_locked += user_extra;
if (user_locked > user_lock_limit) {
extra = user_locked - user_lock_limit;
user_extra -= extra;
}
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
if ((locked > lock_limit) && perf_is_paranoid() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
WARN_ON(!rb && event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
if (!rb) {
rb = rb_alloc(nr_pages,
event->attr.watermark ? event->attr.wakeup_watermark : 0,
event->cpu, flags);
if (!rb) {
ret = -ENOMEM;
goto unlock;
}
atomic_set(&rb->mmap_count, 1);
rb->mmap_user = get_current_user();
rb->mmap_locked = extra;
ring_buffer_attach(event, rb);
perf_event_update_time(event);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {
ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
event->attr.aux_watermark, flags);
if (!ret)
rb->aux_mmap_locked = extra;
}
unlock:
if (!ret) {
atomic_long_add(user_extra, &user->locked_vm);
atomic64_add(extra, &vma->vm_mm->pinned_vm);
atomic_inc(&event->mmap_count);
} else if (rb) {
atomic_dec(&rb->mmap_count);
}
aux_unlock:
mutex_unlock(&event->mmap_mutex);
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
event->pmu->event_mapped(event, vma->vm_mm);
return ret;
}
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
inode_lock(inode);
retval = fasync_helper(fd, filp, on, &event->fasync);
inode_unlock(inode);
if (retval < 0)
return retval;
return 0;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_compat_ioctl,
.mmap = perf_mmap,
.fasync = perf_fasync,
};
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
if (event->parent)
event = event->parent;
return &event->fasync;
}
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
static void perf_sigtrap(struct perf_event *event)
{
if (WARN_ON_ONCE(event->ctx->task != current))
return;
if (current->flags & PF_EXITING)
return;
send_sig_perf((void __user *)event->pending_addr,
event->attr.type, event->attr.sig_data);
}
static void __perf_pending_irq(struct perf_event *event)
{
int cpu = READ_ONCE(event->oncpu);
if (cpu < 0)
return;
if (cpu == smp_processor_id()) {
if (event->pending_sigtrap) {
event->pending_sigtrap = 0;
perf_sigtrap(event);
local_dec(&event->ctx->nr_pending);
}
if (event->pending_disable) {
event->pending_disable = 0;
perf_event_disable_local(event);
}
return;
}
irq_work_queue_on(&event->pending_irq, cpu);
}
static void perf_pending_irq(struct irq_work *entry)
{
struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
int rctx;
rctx = perf_swevent_get_recursion_context();
if (event->pending_wakeup) {
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
__perf_pending_irq(event);
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
static void perf_pending_task(struct callback_head *head)
{
struct perf_event *event = container_of(head, struct perf_event, pending_task);
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (event->pending_work) {
event->pending_work = 0;
perf_sigtrap(event);
local_dec(&event->ctx->nr_pending);
}
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
put_event(event);
}
#ifdef CONFIG_GUEST_PERF_EVENTS
struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
return;
rcu_assign_pointer(perf_guest_cbs, cbs);
static_call_update(__perf_guest_state, cbs->state);
static_call_update(__perf_guest_get_ip, cbs->get_ip);
if (cbs->handle_intel_pt_intr)
static_call_update(__perf_guest_handle_intel_pt_intr,
cbs->handle_intel_pt_intr);
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
return;
rcu_assign_pointer(perf_guest_cbs, NULL);
static_call_update(__perf_guest_state, (void *)&__static_call_return0);
static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0);
static_call_update(__perf_guest_handle_intel_pt_intr,
(void *)&__static_call_return0);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
#endif
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{
int bit;
DECLARE_BITMAP(_mask, 64);
bitmap_from_u64(_mask, mask);
for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
u64 val;
val = perf_reg_value(regs, bit);
perf_output_put(handle, val);
}
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
} else if (!(current->flags & PF_KTHREAD)) {
perf_get_regs_user(regs_user, regs);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
}
}
static void perf_sample_regs_intr(struct perf_regs *regs_intr,
struct pt_regs *regs)
{
regs_intr->regs = regs;
regs_intr->abi = perf_reg_abi(current);
}
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
unsigned long addr = perf_user_stack_pointer(regs);
if (!addr || addr >= TASK_SIZE)
return 0;
return TASK_SIZE - addr;
}
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{
u64 task_size;
if (!regs)
return 0;
task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
stack_size = min(stack_size, (u16) task_size);
header_size += 2 * sizeof(u64);
if ((u16) (header_size + stack_size) < header_size) {
stack_size = USHRT_MAX - header_size - sizeof(u64);
stack_size = round_up(stack_size, sizeof(u64));
}
return stack_size;
}
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{
if (!regs) {
u64 size = 0;
perf_output_put(handle, size);
} else {
unsigned long sp;
unsigned int rem;
u64 dyn_size;
perf_output_put(handle, dump_size);
sp = perf_user_stack_pointer(regs);
rem = __output_copy_user(handle, (void *) sp, dump_size);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
perf_output_put(handle, dyn_size);
}
}
static unsigned long perf_prepare_sample_aux(struct perf_event *event,
struct perf_sample_data *data,
size_t size)
{
struct perf_event *sampler = event->aux_event;
struct perf_buffer *rb;
data->aux_size = 0;
if (!sampler)
goto out;
if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
goto out;
if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
goto out;
rb = ring_buffer_get(sampler);
if (!rb)
goto out;
if (READ_ONCE(rb->aux_in_sampling)) {
data->aux_size = 0;
} else {
size = min_t(size_t, size, perf_aux_size(rb));
data->aux_size = ALIGN(size, sizeof(u64));
}
ring_buffer_put(rb);
out:
return data->aux_size;
}
static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
struct perf_event *event,
struct perf_output_handle *handle,
unsigned long size)
{
unsigned long flags;
long ret;
local_irq_save(flags);
WRITE_ONCE(rb->aux_in_sampling, 1);
barrier();
ret = event->pmu->snapshot_aux(event, handle, size);
barrier();
WRITE_ONCE(rb->aux_in_sampling, 0);
local_irq_restore(flags);
return ret;
}
static void perf_aux_sample_output(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *data)
{
struct perf_event *sampler = event->aux_event;
struct perf_buffer *rb;
unsigned long pad;
long size;
if (WARN_ON_ONCE(!sampler || !data->aux_size))
return;
rb = ring_buffer_get(sampler);
if (!rb)
return;
size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
if (WARN_ON_ONCE(size < 0))
goto out_put;
pad = data->aux_size - size;
if (WARN_ON_ONCE(pad >= sizeof(u64)))
pad = 8;
if (pad) {
u64 zero = 0;
perf_output_copy(handle, &zero, pad);
}
out_put:
ring_buffer_put(rb);
}
#define PERF_SAMPLE_ID_ALL (PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
static void __perf_event_header__init_id(struct perf_sample_data *data,
struct perf_event *event,
u64 sample_type)
{
data->type = event->attr.sample_type;
data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL;
if (sample_type & PERF_SAMPLE_TID) {
data->tid_entry.pid = perf_event_pid(event, current);
data->tid_entry.tid = perf_event_tid(event, current);
}
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_event_clock(event);
if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
data->stream_id = event->id;
if (sample_type & PERF_SAMPLE_CPU) {
data->cpu_entry.cpu = raw_smp_processor_id();
data->cpu_entry.reserved = 0;
}
}
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all) {
header->size += event->id_header_size;
__perf_event_header__init_id(data, event, event->attr.sample_type);
}
}
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
perf_output_put(handle, data->id);
}
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] = running +
atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&event->lost_samples);
__output_copy(handle, values, n * sizeof(u64));
}
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
unsigned long flags;
u64 values[6];
int n = 0;
local_irq_save(flags);
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if ((leader != event) &&
(leader->state == PERF_EVENT_STATE_ACTIVE))
leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&leader->lost_samples);
__output_copy(handle, values, n * sizeof(u64));
for_each_sibling_event(sub, leader) {
n = 0;
if ((sub != event) &&
(sub->state == PERF_EVENT_STATE_ACTIVE))
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
if (read_format & PERF_FORMAT_LOST)
values[n++] = atomic64_read(&sub->lost_samples);
__output_copy(handle, values, n * sizeof(u64));
}
local_irq_restore(flags);
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = data->type;
perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IDENTIFIER)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
size += data->callchain->nr;
size *= sizeof(u64);
__output_copy(handle, data->callchain, size);
}
if (sample_type & PERF_SAMPLE_RAW) {
struct perf_raw_record *raw = data->raw;
if (raw) {
struct perf_raw_frag *frag = &raw->frag;
perf_output_put(handle, raw->size);
do {
if (frag->copy) {
__output_custom(handle, frag->copy,
frag->data, frag->size);
} else {
__output_copy(handle, frag->data,
frag->size);
}
if (perf_raw_frag_last(frag))
break;
frag = frag->next;
} while (1);
if (frag->pad)
__output_skip(handle, NULL, frag->pad);
} else {
struct {
u32 size;
u32 data;
} raw = {
.size = sizeof(u32),
.data = 0,
};
perf_output_put(handle, raw);
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
size = data->br_stack->nr
* sizeof(struct perf_branch_entry);
perf_output_put(handle, data->br_stack->nr);
if (branch_sample_hw_index(event))
perf_output_put(handle, data->br_stack->hw_idx);
perf_output_copy(handle, data->br_stack->entries, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
u64 abi = data->regs_user.abi;
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_user;
perf_output_sample_regs(handle,
data->regs_user.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
}
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
perf_output_put(handle, data->weight.full);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
if (sample_type & PERF_SAMPLE_TRANSACTION)
perf_output_put(handle, data->txn);
if (sample_type & PERF_SAMPLE_REGS_INTR) {
u64 abi = data->regs_intr.abi;
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_intr;
perf_output_sample_regs(handle,
data->regs_intr.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
perf_output_put(handle, data->phys_addr);
if (sample_type & PERF_SAMPLE_CGROUP)
perf_output_put(handle, data->cgroup);
if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
perf_output_put(handle, data->data_page_size);
if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
perf_output_put(handle, data->code_page_size);
if (sample_type & PERF_SAMPLE_AUX) {
perf_output_put(handle, data->aux_size);
if (data->aux_size)
perf_aux_sample_output(event, handle, data);
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct perf_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
}
static u64 perf_virt_to_phys(u64 virt)
{
u64 phys_addr = 0;
if (!virt)
return 0;
if (virt >= TASK_SIZE) {
if (virt_addr_valid((void *)(uintptr_t)virt) &&
!(virt >= VMALLOC_START && virt < VMALLOC_END))
phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
} else {
if (current->mm != NULL) {
struct page *p;
pagefault_disable();
if (get_user_page_fast_only(virt, 0, &p)) {
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
put_page(p);
}
pagefault_enable();
}
}
return phys_addr;
}
static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
{
u64 size = 0;
#ifdef CONFIG_HAVE_FAST_GUP
pgd_t *pgdp, pgd;
p4d_t *p4dp, p4d;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
if (pgd_none(pgd))
return 0;
if (pgd_leaf(pgd))
return pgd_leaf_size(pgd);
p4dp = p4d_offset_lockless(pgdp, pgd, addr);
p4d = READ_ONCE(*p4dp);
if (!p4d_present(p4d))
return 0;
if (p4d_leaf(p4d))
return p4d_leaf_size(p4d);
pudp = pud_offset_lockless(p4dp, p4d, addr);
pud = READ_ONCE(*pudp);
if (!pud_present(pud))
return 0;
if (pud_leaf(pud))
return pud_leaf_size(pud);
pmdp = pmd_offset_lockless(pudp, pud, addr);
pmd = pmdp_get_lockless(pmdp);
if (!pmd_present(pmd))
return 0;
if (pmd_leaf(pmd))
return pmd_leaf_size(pmd);
ptep = pte_offset_map(&pmd, addr);
pte = ptep_get_lockless(ptep);
if (pte_present(pte))
size = pte_leaf_size(pte);
pte_unmap(ptep);
#endif /* CONFIG_HAVE_FAST_GUP */
return size;
}
static u64 perf_get_page_size(unsigned long addr)
{
struct mm_struct *mm;
unsigned long flags;
u64 size;
if (!addr)
return 0;
local_irq_save(flags);
mm = current->mm;
if (!mm) {
mm = &init_mm;
}
size = perf_get_pgtable_size(mm, addr);
local_irq_restore(flags);
return size;
}
static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs)
{
bool kernel = !event->attr.exclude_callchain_kernel;
bool user = !event->attr.exclude_callchain_user;
bool crosstask = event->ctx->task && event->ctx->task != current;
const u32 max_stack = event->attr.sample_max_stack;
struct perf_callchain_entry *callchain;
if (!kernel && !user)
return &__empty_callchain;
callchain = get_perf_callchain(regs, 0, kernel, user,
max_stack, crosstask, true);
return callchain ?: &__empty_callchain;
}
static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
{
return d * !!(flags & s);
}
void perf_prepare_sample(struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
u64 filtered_sample_type;
filtered_sample_type = sample_type;
filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_CODE_PAGE_SIZE,
PERF_SAMPLE_IP);
filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_DATA_PAGE_SIZE |
PERF_SAMPLE_PHYS_ADDR, PERF_SAMPLE_ADDR);
filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_STACK_USER,
PERF_SAMPLE_REGS_USER);
filtered_sample_type &= ~data->sample_flags;
if (filtered_sample_type == 0) {
data->type = event->attr.sample_type;
return;
}
__perf_event_header__init_id(data, event, filtered_sample_type);
if (filtered_sample_type & PERF_SAMPLE_IP) {
data->ip = perf_instruction_pointer(regs);
data->sample_flags |= PERF_SAMPLE_IP;
}
if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
perf_sample_save_callchain(data, event, regs);
if (filtered_sample_type & PERF_SAMPLE_RAW) {
data->raw = NULL;
data->dyn_size += sizeof(u64);
data->sample_flags |= PERF_SAMPLE_RAW;
}
if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
data->br_stack = NULL;
data->dyn_size += sizeof(u64);
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (filtered_sample_type & PERF_SAMPLE_REGS_USER)
perf_sample_regs_user(&data->regs_user, regs);
if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) {
int size = sizeof(u64);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_REGS_USER;
}
if (filtered_sample_type & PERF_SAMPLE_STACK_USER) {
u16 stack_size = event->attr.sample_stack_user;
u16 header_size = perf_sample_data_size(data, event);
u16 size = sizeof(u64);
stack_size = perf_sample_ustack_size(stack_size, header_size,
data->regs_user.regs);
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_STACK_USER;
}
if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
data->weight.full = 0;
data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
}
if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) {
data->data_src.val = PERF_MEM_NA;
data->sample_flags |= PERF_SAMPLE_DATA_SRC;
}
if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) {
data->txn = 0;
data->sample_flags |= PERF_SAMPLE_TRANSACTION;
}
if (filtered_sample_type & PERF_SAMPLE_ADDR) {
data->addr = 0;
data->sample_flags |= PERF_SAMPLE_ADDR;
}
if (filtered_sample_type & PERF_SAMPLE_REGS_INTR) {
int size = sizeof(u64);
perf_sample_regs_intr(&data->regs_intr, regs);
if (data->regs_intr.regs) {
u64 mask = event->attr.sample_regs_intr;
size += hweight64(mask) * sizeof(u64);
}
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_REGS_INTR;
}
if (filtered_sample_type & PERF_SAMPLE_PHYS_ADDR) {
data->phys_addr = perf_virt_to_phys(data->addr);
data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
}
#ifdef CONFIG_CGROUP_PERF
if (filtered_sample_type & PERF_SAMPLE_CGROUP) {
struct cgroup *cgrp;
cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
data->cgroup = cgroup_id(cgrp);
data->sample_flags |= PERF_SAMPLE_CGROUP;
}
#endif
if (filtered_sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) {
data->data_page_size = perf_get_page_size(data->addr);
data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE;
}
if (filtered_sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) {
data->code_page_size = perf_get_page_size(data->ip);
data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE;
}
if (filtered_sample_type & PERF_SAMPLE_AUX) {
u64 size;
u16 header_size = perf_sample_data_size(data, event);
header_size += sizeof(u64);
size = min_t(size_t, U16_MAX - header_size,
event->attr.aux_sample_size);
size = rounddown(size, 8);
size = perf_prepare_sample_aux(event, data, size);
WARN_ON_ONCE(size + header_size > U16_MAX);
data->dyn_size += size + sizeof(u64);
data->sample_flags |= PERF_SAMPLE_AUX;
}
}
void perf_prepare_header(struct perf_event_header *header,