runtime/linux/perf.c - systemtap
Data types defined
Functions defined
Macros defined
Source code
#ifndef _PERF_C_
#define _PERF_C_
#include <linux/perf_event.h>
#include <linux/workqueue.h>
#include "perf.h"
#ifndef INIT_WORK_ONSTACK
#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK((_work), (_func))
#define destroy_work_on_stack(_work) do { (void)(_work); } while (0)
#endif
static long _stp_perf_init (struct stap_perf_probe *stp, struct task_struct* task)
{
int cpu;
if (!stp->system_wide) {
if (task == 0) return 0;
else {
if (stp->e.t.per_thread_event != 0) return 0;
stp->e.t.per_thread_event = perf_event_create_kernel_counter(&stp->attr,
-1,
#if defined(STAPCONF_PERF_STRUCTPID) || defined (STAPCONF_PERF_COUNTER_CONTEXT)
task,
#else
task->pid,
#endif
stp->callback
#ifdef STAPCONF_PERF_COUNTER_CONTEXT
, NULL
#endif
);
if (IS_ERR(stp->e.t.per_thread_event)) {
long rc = PTR_ERR(stp->e.t.per_thread_event);
stp->e.t.per_thread_event = NULL;
if (rc == -EINVAL || rc == -ENOSYS || rc == -ENOENT
|| rc == -EOPNOTSUPP || rc == -ENXIO) {
_stp_warn("perf probe '%s' is not supported by this kernel (%ld).",
#ifdef STP_NEED_PROBE_NAME
stp->probe->pn,
#else
stp->probe->pp,
#endif
rc);
rc = 0;
}
return rc;
}
}
}
else {
stp->e.events = _stp_alloc_percpu (sizeof(struct perf_event*));
if (stp->e.events == NULL) {
return -ENOMEM;
}
for_each_possible_cpu(cpu) {
struct perf_event **event = per_cpu_ptr (stp->e.events, cpu);
if (cpu_is_offline(cpu)) {
*event = NULL;
continue;
}
*event = perf_event_create_kernel_counter(&stp->attr,
cpu,
#if defined(STAPCONF_PERF_STRUCTPID) || defined (STAPCONF_PERF_COUNTER_CONTEXT)
NULL,
#else
-1,
#endif
stp->callback
#ifdef STAPCONF_PERF_COUNTER_CONTEXT
, NULL
#endif
);
if (IS_ERR(*event)) {
long rc = PTR_ERR(*event);
*event = NULL;
_stp_perf_del(stp);
return rc;
}
}
} return 0;
}
static void _stp_perf_del (struct stap_perf_probe *stp)
{
int cpu;
if (! stp || !stp->e.events)
return;
if (stp->system_wide) {
for_each_possible_cpu(cpu) {
struct perf_event **event = per_cpu_ptr (stp->e.events, cpu);
if (*event) {
perf_event_release_kernel(*event);
}
}
_stp_free_percpu (stp->e.events);
stp->e.events = NULL;
}
else {
if (stp->e.t.per_thread_event) {
perf_event_release_kernel(stp->e.t.per_thread_event);
}
stp->e.t.per_thread_event = NULL;
}
}
static void _stp_perf_del_n (struct stap_perf_probe *probes, size_t n)
{
while (n--)
_stp_perf_del(&probes[n]);
}
struct _stp_perf_work {
struct work_struct work;
struct stap_perf_probe *probes;
size_t nprobes;
const char* probe_point;
int rc;
};
static void _stp_perf_init_work (struct work_struct *work)
{
size_t i;
struct _stp_perf_work *pwork =
container_of(work, struct _stp_perf_work, work);
for (i = 0; i < pwork->nprobes; ++i) {
struct stap_perf_probe* stp = &pwork->probes[i];
if (stp->system_wide)
pwork->rc = _stp_perf_init(stp, NULL);
else if (stp->task_finder)
#ifdef STP_PERF_USE_TASK_FINDER
pwork->rc = stap_register_task_finder_target(&stp->e.t.tgt);
#else
pwork->rc = EINVAL;
#endif
if (pwork->rc) {
pwork->probe_point = stp->probe->pp;
_stp_perf_del_n(pwork->probes, i);
break;
}
}
}
static int _stp_perf_init_n (struct stap_perf_probe *probes, size_t n,
const char **ppfail)
{
struct _stp_perf_work pwork = { .probes = probes, .nprobes = n };
INIT_WORK_ONSTACK(&pwork.work, _stp_perf_init_work);
schedule_work(&pwork.work);
flush_work(&pwork.work);
if (pwork.rc)
*ppfail = pwork.probe_point;
destroy_work_on_stack(&pwork.work);
return pwork.rc;
}
static int _stp_perf_read_init (unsigned i, struct task_struct* task)
{
struct stap_perf_probe* stp = & stap_perf_probes[i];
return _stp_perf_init (stp, task);
}
long _stp_perf_read (int ncpu, unsigned i)
{
struct stap_perf_probe* stp;
u64 enabled, running;
if (i > sizeof(stap_perf_probes)/sizeof(struct stap_perf_probe))
{
_stp_error ("_stp_perf_read\n");
return 0;
}
stp = & stap_perf_probes[i];
if (stp == NULL || stp->e.t.per_thread_event == NULL)
{
_stp_error ("_stp_perf_read\n");
return 0;
}
might_sleep();
return perf_event_read_value (stp->e.t.per_thread_event, &enabled, &running);
}
#endif