runtime/linux/task_finder2.c - systemtap
Global variables defined
Data types defined
Functions defined
Macros defined
Source code
#ifndef TASK_FINDER2_C
#define TASK_FINDER2_C
#include "stp_utrace.c"
#include <linux/list.h>
#include <linux/binfmts.h>
#include <linux/mount.h>
#ifndef STAPCONF_TASK_UID
#include <linux/cred.h>
#endif
#include "../uidgid_compatibility.h"
#include "syscall.h"
#include "task_finder_map.c"
#include "task_finder_vma.c"
static LIST_HEAD(__stp_task_finder_list);
struct stap_task_finder_target;
#define __STP_TF_UNITIALIZED 0
#define __STP_TF_STARTING 1
#define __STP_TF_RUNNING 2
#define __STP_TF_STOPPING 3
#define __STP_TF_STOPPED 4
static atomic_t __stp_task_finder_state = ATOMIC_INIT(__STP_TF_UNITIALIZED);
static atomic_t __stp_task_finder_complete = ATOMIC_INIT(0);
static atomic_t __stp_inuse_count = ATOMIC_INIT (0);
#define __stp_tf_handler_start() (atomic_inc(&__stp_inuse_count))
#define __stp_tf_handler_end() (atomic_dec(&__stp_inuse_count))
#ifdef DEBUG_TASK_FINDER
static atomic_t __stp_attach_count = ATOMIC_INIT (0);
#define debug_task_finder_attach() (atomic_inc(&__stp_attach_count))
#define debug_task_finder_detach() (atomic_dec(&__stp_attach_count))
#define debug_task_finder_report() \
(printk(KERN_ERR "%s:%d - attach count: %d, inuse count: %d\n", \
__FUNCTION__, __LINE__, atomic_read(&__stp_attach_count), \
atomic_read(&__stp_inuse_count)))
#else
#define debug_task_finder_attach() #define debug_task_finder_detach() #define debug_task_finder_report() #endif
typedef int (*stap_task_finder_callback)(struct stap_task_finder_target *tgt,
struct task_struct *tsk,
int register_p,
int process_p);
typedef int
(*stap_task_finder_mmap_callback)(struct stap_task_finder_target *tgt,
struct task_struct *tsk,
char *path,
struct dentry *dentry,
unsigned long addr,
unsigned long length,
unsigned long offset,
unsigned long vm_flags);
typedef int
(*stap_task_finder_munmap_callback)(struct stap_task_finder_target *tgt,
struct task_struct *tsk,
unsigned long addr,
unsigned long length);
typedef int
(*stap_task_finder_mprotect_callback)(struct stap_task_finder_target *tgt,
struct task_struct *tsk,
unsigned long addr,
unsigned long length,
int prot);
struct stap_task_finder_target {
struct list_head list; struct list_head callback_list_head;
struct list_head callback_list;
struct utrace_engine_ops ops;
size_t pathlen;
unsigned engine_attached:1;
unsigned mmap_events:1;
unsigned munmap_events:1;
unsigned mprotect_events:1;
pid_t pid;
const char *procname;
const char *purpose;
stap_task_finder_callback callback;
stap_task_finder_mmap_callback mmap_callback;
stap_task_finder_munmap_callback munmap_callback;
stap_task_finder_mprotect_callback mprotect_callback;
};
static LIST_HEAD(__stp_tf_task_work_list);
static STP_DEFINE_SPINLOCK(__stp_tf_task_work_list_lock);
struct __stp_tf_task_work {
struct list_head list;
struct task_struct *task;
void *data;
struct task_work work;
};
static struct task_work *
__stp_tf_alloc_task_work(void *data)
{
struct __stp_tf_task_work *tf_work;
unsigned long flags;
tf_work = _stp_kmalloc(sizeof(*tf_work));
if (tf_work == NULL) {
_stp_error("Unable to allocate space for task_work");
return NULL;
}
tf_work->task = current;
tf_work->data = data;
stp_spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
list_add(&tf_work->list, &__stp_tf_task_work_list);
stp_spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
return &tf_work->work;
}
static void __stp_tf_free_task_work(struct task_work *work)
{
struct __stp_tf_task_work *tf_work, *node;
unsigned long flags;
tf_work = container_of(work, struct __stp_tf_task_work, work);
stp_spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
list_for_each_entry(node, &__stp_tf_task_work_list, list) {
if (tf_work == node) {
list_del(&tf_work->list);
break;
}
}
stp_spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
_stp_kfree(tf_work);
}
static void __stp_tf_cancel_task_work(void)
{
struct __stp_tf_task_work *node;
struct __stp_tf_task_work *tmp;
unsigned long flags;
stp_spin_lock_irqsave(&__stp_tf_task_work_list_lock, flags);
list_for_each_entry_safe(node, tmp, &__stp_tf_task_work_list, list) {
list_del(&node->list);
stp_task_work_cancel(node->task, node->work.func);
_stp_kfree(node);
}
stp_spin_unlock_irqrestore(&__stp_tf_task_work_list_lock, flags);
}
static u32
__stp_utrace_task_finder_target_exec(u32 action,
struct utrace_engine *engine,
const struct linux_binfmt *fmt,
const struct linux_binprm *bprm,
struct pt_regs *regs);
static u32
__stp_utrace_task_finder_target_death(struct utrace_engine *engine,
bool group_dead, int signal);
static u32
__stp_utrace_task_finder_target_quiesce(u32 action,
struct utrace_engine *engine,
unsigned long event);
static u32
__stp_utrace_task_finder_target_syscall_entry(u32 action,
struct utrace_engine *engine,
struct pt_regs *regs);
static u32
__stp_utrace_task_finder_target_syscall_exit(u32 action,
struct utrace_engine *engine,
struct pt_regs *regs);
static void
__stp_call_mmap_callbacks_for_task(struct stap_task_finder_target *tgt,
struct task_struct *tsk);
static int
stap_register_task_finder_target(struct stap_task_finder_target *new_tgt)
{
struct list_head *node;
struct stap_task_finder_target *tgt = NULL;
int found_node = 0;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_UNITIALIZED) {
_stp_error("task_finder already started, no new targets allowed");
return EBUSY;
}
if (new_tgt == NULL)
return EFAULT;
if (new_tgt->procname != NULL)
new_tgt->pathlen = strlen(new_tgt->procname);
else
new_tgt->pathlen = 0;
new_tgt->engine_attached = 0;
new_tgt->mmap_events = 0;
new_tgt->munmap_events = 0;
new_tgt->mprotect_events = 0;
memset(&new_tgt->ops, 0, sizeof(new_tgt->ops));
new_tgt->ops.report_exec = &__stp_utrace_task_finder_target_exec;
new_tgt->ops.report_death = &__stp_utrace_task_finder_target_death;
new_tgt->ops.report_quiesce = &__stp_utrace_task_finder_target_quiesce;
new_tgt->ops.report_syscall_entry = \
&__stp_utrace_task_finder_target_syscall_entry;
new_tgt->ops.report_syscall_exit = \
&__stp_utrace_task_finder_target_syscall_exit;
list_for_each(node, &__stp_task_finder_list) {
tgt = list_entry(node, struct stap_task_finder_target, list);
if (tgt == new_tgt) {
_stp_error("target already registered");
return EINVAL;
}
if (tgt != NULL
&& ((new_tgt->pathlen > 0
&& tgt->pathlen == new_tgt->pathlen
&& strcmp(tgt->procname, new_tgt->procname) == 0)
|| (new_tgt->pathlen == 0 && tgt->pathlen == 0
&& tgt->pid == new_tgt->pid))) {
found_node = 1;
break;
}
}
if (! found_node) {
INIT_LIST_HEAD(&new_tgt->callback_list_head);
list_add(&new_tgt->list, &__stp_task_finder_list);
tgt = new_tgt;
}
list_add_tail(&new_tgt->callback_list, &tgt->callback_list_head);
if (new_tgt->mmap_callback != NULL)
tgt->mmap_events = 1;
if (new_tgt->munmap_callback != NULL)
tgt->munmap_events = 1;
if (new_tgt->mprotect_callback != NULL)
tgt->mprotect_events = 1;
return 0;
}
static int
stap_utrace_detach(struct task_struct *tsk,
const struct utrace_engine_ops *ops)
{
struct utrace_engine *engine;
struct mm_struct *mm;
int rc = 0;
if (tsk == NULL || tsk->pid <= 0)
return 0;
#ifdef PF_KTHREAD
if (tsk->flags & PF_KTHREAD)
return 0;
#endif
engine = utrace_attach_task(tsk, UTRACE_ATTACH_MATCH_OPS, ops, 0);
if (IS_ERR(engine)) {
rc = -PTR_ERR(engine);
if (rc != ENOENT) {
_stp_error("utrace_attach_task returned error %d on pid %d",
rc, tsk->pid);
}
else {
rc = 0;
}
}
else if (unlikely(engine == NULL)) {
_stp_error("utrace_attach returned NULL on pid %d",
(int)tsk->pid);
rc = EFAULT;
}
else {
rc = utrace_control(tsk, engine, UTRACE_DETACH);
switch (rc) {
case 0: debug_task_finder_detach();
break;
case -ESRCH: case -EALREADY: rc = 0; break;
case -EINPROGRESS:
do {
rc = utrace_barrier(tsk, engine);
} while (rc == -ERESTARTSYS);
if (rc == 0 || rc == -ESRCH || rc == -EALREADY) {
rc = 0;
debug_task_finder_detach();
} else {
rc = -rc;
_stp_error("utrace_barrier returned error %d on pid %d", rc, tsk->pid);
}
break;
default:
rc = -rc;
_stp_error("utrace_control returned error %d on pid %d",
rc, tsk->pid);
break;
}
utrace_engine_put(engine);
}
return rc;
}
static void
stap_utrace_detach_ops(struct utrace_engine_ops *ops)
{
struct task_struct *grp, *tsk;
struct utrace_engine *engine;
pid_t pid = 0;
int rc = 0;
rcu_read_lock();
do_each_thread(grp, tsk) {
#ifdef PF_KTHREAD
if (tsk->flags & PF_KTHREAD)
continue;
#endif
rc = stap_utrace_detach(tsk, ops);
if (rc != 0)
_stp_error("stap_utrace_detach returned error %d on pid %d", rc, tsk->pid);
WARN_ON(rc != 0);
} while_each_thread(grp, tsk);
rcu_read_unlock();
debug_task_finder_report();
}
static char *
__stp_get_mm_path(struct mm_struct *mm, char *buf, int buflen)
{
struct file *vm_file;
char *rc = NULL;
if (!mm || ! down_read_trylock(&mm->mmap_sem)) {
*buf = '\0';
return ERR_PTR(-ENOENT);
}
vm_file = stap_find_exe_file(mm);
if (vm_file) {
#ifdef STAPCONF_DPATH_PATH
rc = d_path(&(vm_file->f_path), buf, buflen);
#else
rc = d_path(vm_file->f_dentry, vm_file->f_vfsmnt,
buf, buflen);
#endif
}
else {
*buf = '\0';
rc = ERR_PTR(-ENOENT);
}
up_read(&mm->mmap_sem);
return rc;
}
#define __STP_TASK_FINDER_EVENTS (UTRACE_EVENT(CLONE) \
| UTRACE_EVENT(EXEC) \
| UTRACE_EVENT(DEATH))
#define __STP_TASK_BASE_EVENTS (UTRACE_EVENT(DEATH)|UTRACE_EVENT(EXEC))
#define __STP_TASK_VM_BASE_EVENTS (__STP_TASK_BASE_EVENTS \
| UTRACE_EVENT(SYSCALL_ENTRY)\
| UTRACE_EVENT(SYSCALL_EXIT))
#define __STP_ATTACHED_TASK_EVENTS (UTRACE_EVENT(DEATH) \
| UTRACE_EVENT(QUIESCE))
#define __STP_ATTACHED_TASK_BASE_EVENTS(tgt) \
(((tgt)->mmap_events || (tgt)->munmap_events \
|| (tgt)->mprotect_events) \
? __STP_TASK_VM_BASE_EVENTS : __STP_TASK_BASE_EVENTS)
static int
__stp_utrace_attach(struct task_struct *tsk,
const struct utrace_engine_ops *ops, void *data,
unsigned long event_flags,
enum utrace_resume_action action)
{
struct utrace_engine *engine;
int rc = 0;
if (tsk == NULL || tsk->pid <= 0)
return EPERM;
#ifdef PF_KTHREAD
if (tsk->flags & PF_KTHREAD)
return EPERM;
#endif
if (! tsk->mm)
return EPERM;
engine = utrace_attach_task(tsk, UTRACE_ATTACH_CREATE, ops, data);
if (IS_ERR(engine)) {
int error = -PTR_ERR(engine);
if (error != ESRCH && error != ENOENT) {
_stp_error("utrace_attach returned error %d on pid %d",
error, (int)tsk->pid);
rc = error;
}
}
else if (unlikely(engine == NULL)) {
_stp_error("utrace_attach returned NULL on pid %d",
(int)tsk->pid);
rc = EFAULT;
}
else {
rc = utrace_set_events(tsk, engine, event_flags);
if (rc == -EINPROGRESS) {
do {
rc = utrace_barrier(tsk, engine);
} while (rc == -ERESTARTSYS);
if (rc != 0 && rc != -ESRCH && rc != -EALREADY)
_stp_error("utrace_barrier returned error %d on pid %d",
rc, (int)tsk->pid);
}
if (rc == 0) {
debug_task_finder_attach();
if (action != UTRACE_RESUME) {
rc = utrace_control(tsk, engine, action);
if (rc != 0 && rc != -EINPROGRESS) {
_stp_error("utrace_control returned error %d on pid %d",
rc, (int)tsk->pid);
}
rc = 0;
}
}
else if (rc != -ESRCH && rc != -EALREADY)
_stp_error("utrace_set_events2 returned error %d on pid %d",
rc, (int)tsk->pid);
utrace_engine_put(engine);
}
return rc;
}
static int
stap_utrace_attach(struct task_struct *tsk,
const struct utrace_engine_ops *ops, void *data,
unsigned long event_flags)
{
return __stp_utrace_attach(tsk, ops, data, event_flags, UTRACE_RESUME);
}
static inline void
__stp_call_callbacks(struct stap_task_finder_target *tgt,
struct task_struct *tsk, int register_p, int process_p)
{
struct list_head *cb_node;
int rc;
if (tgt == NULL || tsk == NULL)
return;
list_for_each(cb_node, &tgt->callback_list_head) {
struct stap_task_finder_target *cb_tgt;
cb_tgt = list_entry(cb_node, struct stap_task_finder_target,
callback_list);
if (cb_tgt == NULL || cb_tgt->callback == NULL)
continue;
rc = cb_tgt->callback(cb_tgt, tsk, register_p, process_p);
if (rc != 0) {
_stp_warn("task_finder %s%scallback for task %d failed: %d",
(cb_tgt->purpose?:""), (cb_tgt->purpose?" ":""),
(int)tsk->pid, rc);
}
}
}
static void
__stp_call_mmap_callbacks(struct stap_task_finder_target *tgt,
struct task_struct *tsk, char *path,
struct dentry *dentry,
unsigned long addr, unsigned long length,
unsigned long offset, unsigned long vm_flags)
{
struct list_head *cb_node;
int rc;
if (tgt == NULL || tsk == NULL)
return;
dbug_task_vma(1,
"pid %d, a/l/o/p/path 0x%lx 0x%lx 0x%lx %c%c%c%c %s\n",
tsk->pid, addr, length, offset,
vm_flags & VM_READ ? 'r' : '-',
vm_flags & VM_WRITE ? 'w' : '-',
vm_flags & VM_EXEC ? 'x' : '-',
vm_flags & VM_MAYSHARE ? 's' : 'p',
path);
list_for_each(cb_node, &tgt->callback_list_head) {
struct stap_task_finder_target *cb_tgt;
cb_tgt = list_entry(cb_node, struct stap_task_finder_target,
callback_list);
if (cb_tgt == NULL || cb_tgt->mmap_callback == NULL)
continue;
rc = cb_tgt->mmap_callback(cb_tgt, tsk, path, dentry,
addr, length, offset, vm_flags);
if (rc != 0) {
_stp_warn("task_finder mmap %s%scallback for task %d failed: %d",
(cb_tgt->purpose?:""), (cb_tgt->purpose?" ":""),
(int)tsk->pid, rc);
}
}
}
static struct vm_area_struct *
__stp_find_file_based_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = find_vma(mm, addr);
if (vma && (vma->vm_file == NULL || vma->vm_start > addr))
vma = NULL;
return vma;
}
static void
__stp_call_mmap_callbacks_with_addr(struct stap_task_finder_target *tgt,
struct task_struct *tsk,
unsigned long addr)
{
struct mm_struct *mm;
struct vm_area_struct *vma;
char *mmpath_buf = NULL;
char *mmpath = NULL;
struct dentry *dentry = NULL;
unsigned long length = 0;
unsigned long offset = 0;
unsigned long vm_flags = 0;
mm = tsk->mm;
if (! mm)
return;
if (! down_read_trylock(&mm->mmap_sem))
return;
vma = __stp_find_file_based_vma(mm, addr);
if (vma) {
addr = vma->vm_start;
length = vma->vm_end - vma->vm_start;
offset = (vma->vm_pgoff << PAGE_SHIFT);
vm_flags = vma->vm_flags;
dentry = vma->vm_file->f_dentry;
mmpath_buf = _stp_kmalloc(PATH_MAX);
if (mmpath_buf == NULL) {
up_read(&mm->mmap_sem);
_stp_error("Unable to allocate space for path");
return;
}
else {
#ifdef STAPCONF_DPATH_PATH
mmpath = d_path(&(vma->vm_file->f_path), mmpath_buf,
PATH_MAX);
#else
mmpath = d_path(vma->vm_file->f_dentry,
vma->vm_file->f_vfsmnt, mmpath_buf,
PATH_MAX);
#endif
if (mmpath == NULL || IS_ERR(mmpath)) {
long err = ((mmpath == NULL) ? 0
: -PTR_ERR(mmpath));
_stp_error("Unable to get path (error %ld) for pid %d",
err, (int)tsk->pid);
mmpath = NULL;
}
}
}
up_read(&mm->mmap_sem);
if (mmpath)
__stp_call_mmap_callbacks(tgt, tsk, mmpath, dentry, addr,
length, offset, vm_flags);
if (mmpath_buf)
_stp_kfree(mmpath_buf);
return;
}
static inline void
__stp_call_munmap_callbacks(struct stap_task_finder_target *tgt,
struct task_struct *tsk, unsigned long addr,
unsigned long length)
{
struct list_head *cb_node;
int rc;
if (tgt == NULL || tsk == NULL)
return;
list_for_each(cb_node, &tgt->callback_list_head) {
struct stap_task_finder_target *cb_tgt;
cb_tgt = list_entry(cb_node, struct stap_task_finder_target,
callback_list);
if (cb_tgt == NULL || cb_tgt->munmap_callback == NULL)
continue;
rc = cb_tgt->munmap_callback(cb_tgt, tsk, addr, length);
if (rc != 0) {
_stp_warn("task_finder munmap %s%scallback for task %d failed: %d",
(cb_tgt->purpose?:""), (cb_tgt->purpose?" ":""),
(int)tsk->pid, rc);
}
}
}
static inline void
__stp_call_mprotect_callbacks(struct stap_task_finder_target *tgt,
struct task_struct *tsk, unsigned long addr,
unsigned long length, int prot)
{
struct list_head *cb_node;
int rc;
if (tgt == NULL || tsk == NULL)
return;
list_for_each(cb_node, &tgt->callback_list_head) {
struct stap_task_finder_target *cb_tgt;
cb_tgt = list_entry(cb_node, struct stap_task_finder_target,
callback_list);
if (cb_tgt == NULL || cb_tgt->mprotect_callback == NULL)
continue;
rc = cb_tgt->mprotect_callback(cb_tgt, tsk, addr, length,
prot);
if (rc != 0) {
_stp_warn("task_finder mprotect %s%scallback for task %d failed: %d",
(cb_tgt->purpose?:""), (cb_tgt->purpose?" ":""),
(int)tsk->pid, rc);
}
}
}
static inline void
__stp_utrace_attach_match_filename(struct task_struct *tsk,
const char * const filename,
int process_p)
{
size_t filelen;
struct list_head *tgt_node;
struct stap_task_finder_target *tgt;
uid_t tsk_euid;
#ifdef STAPCONF_TASK_UID
tsk_euid = tsk->euid;
#else
#if defined(CONFIG_USER_NS) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
tsk_euid = from_kuid_munged(current_user_ns(), task_euid(tsk));
#else
tsk_euid = task_euid(tsk);
#endif
#endif
filelen = strlen(filename);
list_for_each(tgt_node, &__stp_task_finder_list) {
int rc;
tgt = list_entry(tgt_node, struct stap_task_finder_target,
list);
if (tgt == NULL)
continue;
else if (tgt->pathlen > 0
&& (tgt->pathlen != filelen
|| strcmp(tgt->procname, filename) != 0))
continue;
else if (tgt->pid != 0)
continue;
#if ! STP_PRIVILEGE_CONTAINS (STP_PRIVILEGE, STP_PR_STAPDEV) && \
! STP_PRIVILEGE_CONTAINS (STP_PRIVILEGE, STP_PR_STAPSYS)
if (_stp_uid != tsk_euid) {
if (tgt->pid != 0) {
_stp_warn("Process %d does not belong to unprivileged user %d",
tsk->pid, _stp_uid);
}
continue;
}
#endif
rc = __stp_utrace_attach(tsk, &tgt->ops, tgt,
__STP_ATTACHED_TASK_EVENTS,
UTRACE_STOP);
if (rc != 0 && rc != EPERM)
break;
tgt->engine_attached = 1;
}
}
static void
__stp_utrace_attach_match_tsk(struct task_struct *path_tsk,
struct task_struct *match_tsk, int process_p)
{
struct mm_struct *mm;
char *mmpath_buf;
char *mmpath;
#if 0
#endif
if (path_tsk == NULL || path_tsk->pid <= 0
|| match_tsk == NULL || match_tsk->pid <= 0)
return;
mm = path_tsk->mm;
if (! mm) {
return;
}
mmpath_buf = _stp_kmalloc(PATH_MAX);
if (mmpath_buf == NULL) {
_stp_error("Unable to allocate space for path");
return;
}
mmpath = __stp_get_mm_path(mm, mmpath_buf, PATH_MAX);
if (mmpath == NULL || IS_ERR(mmpath)) {
int rc = -PTR_ERR(mmpath);
if (rc != ENOENT)
_stp_error("Unable to get path (error %d) for pid %d",
rc, (int)path_tsk->pid);
}
else {
#if 0
#endif
__stp_utrace_attach_match_filename(match_tsk, mmpath,
process_p);
}
_stp_kfree(mmpath_buf);
return;
}
static void
__stp_tf_clone_worker(struct task_work *work)
{
struct __stp_tf_task_work *tf_work = \
container_of(work, struct __stp_tf_task_work, work);
struct utrace_engine_ops *ops = \
(struct utrace_engine_ops *)tf_work->data;
struct task_struct *parent = tf_work->task;
int rc;
might_sleep();
__stp_tf_free_task_work(work);
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING
|| current->flags & PF_EXITING) {
stp_task_work_func_done();
return;
}
__stp_tf_handler_start();
rc = __stp_utrace_attach(current, ops, 0,
__STP_TASK_FINDER_EVENTS, UTRACE_RESUME);
if (rc == 0 || rc == EPERM) {
__stp_utrace_attach_match_tsk(parent, current,
(current->pid == current->tgid));
}
__stp_tf_handler_end();
stp_task_work_func_done();
return;
}
static u32
__stp_utrace_task_finder_report_clone(u32 action,
struct utrace_engine *engine,
unsigned long clone_flags,
struct task_struct *child)
{
int rc;
struct task_work *work;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
__stp_tf_handler_start();
work = __stp_tf_alloc_task_work((void *)(engine->ops));
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
return UTRACE_RESUME;
}
stp_init_task_work(work, &__stp_tf_clone_worker);
rc = stp_task_work_add(child, work);
if (rc != 0 && rc != -ESRCH) {
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
}
__stp_tf_handler_end();
return UTRACE_RESUME;
}
static u32
__stp_utrace_task_finder_report_exec(u32 action,
struct utrace_engine *engine,
const struct linux_binfmt *fmt,
const struct linux_binprm *bprm,
struct pt_regs *regs)
{
size_t filelen;
struct list_head *tgt_node;
struct stap_task_finder_target *tgt;
int found_node = 0;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
__stp_tf_handler_start();
__stp_utrace_attach_match_tsk(current, current, 1);
__stp_tf_handler_end();
return UTRACE_RESUME;
}
static u32
stap_utrace_task_finder_report_death(struct utrace_engine *engine,
bool group_dead, int signal)
{
debug_task_finder_detach();
return UTRACE_DETACH;
}
static u32
__stp_utrace_task_finder_target_exec(u32 action,
struct utrace_engine *engine,
const struct linux_binfmt *fmt,
const struct linux_binprm *bprm,
struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct stap_task_finder_target *tgt = engine->data;
int rc;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
__stp_tf_handler_start();
if (tgt != NULL && tsk != NULL) {
__stp_call_callbacks(tgt, tsk, 0, 1);
}
__stp_tf_handler_end();
debug_task_finder_detach();
return UTRACE_DETACH;
}
static u32
__stp_utrace_task_finder_target_death(struct utrace_engine *engine,
bool group_dead, int signal)
{
struct task_struct *tsk = current;
struct stap_task_finder_target *tgt = engine->data;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
__stp_tf_handler_start();
if (tgt != NULL && tsk != NULL) {
__stp_call_callbacks(tgt, tsk, 0,
((tsk->signal == NULL)
|| (atomic_read(&tsk->signal->live) == 0)));
}
__stp_tf_handler_end();
debug_task_finder_detach();
return UTRACE_DETACH;
}
static void
__stp_call_mmap_callbacks_for_task(struct stap_task_finder_target *tgt,
struct task_struct *tsk)
{
struct mm_struct *mm;
char *mmpath_buf;
char *mmpath;
struct vm_area_struct *vma;
int file_based_vmas = 0;
struct vma_cache_t {
#ifdef STAPCONF_DPATH_PATH
struct path *f_path;
#else
struct vfsmount *f_vfsmnt;
#endif
struct dentry *dentry;
unsigned long addr;
unsigned long length;
unsigned long offset;
unsigned long vm_flags;
};
struct vma_cache_t *vma_cache = NULL;
struct vma_cache_t *vma_cache_p;
mm = tsk->mm;
if (! mm)
return;
mmpath_buf = _stp_kmalloc(PATH_MAX);
if (mmpath_buf == NULL) {
_stp_error("Unable to allocate space for path");
return;
}
if (! down_read_trylock(&mm->mmap_sem)) {
_stp_kfree(mmpath_buf);
return;
}
vma = mm->mmap;
while (vma) {
if (vma->vm_file)
file_based_vmas++;
vma = vma->vm_next;
}
if (file_based_vmas > 0)
vma_cache = _stp_kmalloc(sizeof(struct vma_cache_t)
* file_based_vmas);
if (vma_cache != NULL) {
vma = mm->mmap;
vma_cache_p = vma_cache;
while (vma) {
if (vma->vm_file) {
#ifdef STAPCONF_DPATH_PATH
vma_cache_p->f_path = &(vma->vm_file->f_path);
path_get(vma_cache_p->f_path);
#else
vma_cache_p->dentry = vma->vm_file->f_dentry;
dget(vma_cache_p->dentry);
vma_cache_p->f_vfsmnt = vma->vm_file->f_vfsmnt;
mntget(vma_cache_p->f_vfsmnt);
#endif
vma_cache_p->dentry = vma->vm_file->f_dentry;
vma_cache_p->addr = vma->vm_start;
vma_cache_p->length = vma->vm_end - vma->vm_start;
vma_cache_p->offset = (vma->vm_pgoff << PAGE_SHIFT);
vma_cache_p->vm_flags = vma->vm_flags;
vma_cache_p++;
}
vma = vma->vm_next;
}
}
up_read(&mm->mmap_sem);
if (vma_cache) {
int i;
vma_cache_p = vma_cache;
for (i = 0; i < file_based_vmas; i++) {
#ifdef STAPCONF_DPATH_PATH
mmpath = d_path(vma_cache_p->f_path, mmpath_buf,
PATH_MAX);
path_put(vma_cache_p->f_path);
#else
mmpath = d_path(vma_cache_p->dentry,
vma_cache_p->f_vfsmnt, mmpath_buf,
PATH_MAX);
dput(vma_cache_p->dentry);
mntput(vma_cache_p->f_vfsmnt);
#endif
if (mmpath == NULL || IS_ERR(mmpath)) {
long err = ((mmpath == NULL) ? 0
: -PTR_ERR(mmpath));
_stp_error("Unable to get path (error %ld) for pid %d",
err, (int)tsk->pid);
}
else {
__stp_call_mmap_callbacks(tgt, tsk, mmpath,
vma_cache_p->dentry,
vma_cache_p->addr,
vma_cache_p->length,
vma_cache_p->offset,
vma_cache_p->vm_flags);
}
vma_cache_p++;
}
_stp_kfree(vma_cache);
}
_stp_kfree(mmpath_buf);
}
static void
__stp_tf_quiesce_worker(struct task_work *work)
{
struct __stp_tf_task_work *tf_work = \
container_of(work, struct __stp_tf_task_work, work);
struct stap_task_finder_target *tgt = \
(struct stap_task_finder_target *)tf_work->data;
might_sleep();
__stp_tf_free_task_work(work);
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING
|| current->flags & PF_EXITING) {
stp_task_work_func_done();
return;
}
__stp_tf_handler_start();
__stp_call_callbacks(tgt, current, 1, (current->pid == current->tgid));
if (tgt->mmap_events == 1 && current->tgid == current->pid) {
__stp_call_mmap_callbacks_for_task(tgt, current);
}
__stp_tf_handler_end();
stp_task_work_func_done();
return;
}
static u32
__stp_utrace_task_finder_target_quiesce(u32 action,
struct utrace_engine *engine,
unsigned long event)
{
struct task_struct *tsk = current;
struct stap_task_finder_target *tgt = engine->data;
int rc;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
if (tgt == NULL || tsk == NULL) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
__stp_tf_handler_start();
rc = utrace_set_events(tsk, engine,
__STP_ATTACHED_TASK_BASE_EVENTS(tgt));
if (rc == -EINPROGRESS) {
do {
rc = utrace_barrier(tsk, engine);
} while (rc == -ERESTARTSYS);
if (rc == 0)
rc = utrace_set_events(tsk, engine,
__STP_ATTACHED_TASK_BASE_EVENTS(tgt));
else if (rc != -ESRCH && rc != -EALREADY)
_stp_error("utrace_barrier returned error %d on pid %d",
rc, (int)tsk->pid);
}
if (rc != 0)
_stp_error("utrace_set_events returned error %d on pid %d",
rc, (int)tsk->pid);
if (in_atomic() || irqs_disabled()) {
struct task_work *work;
work = __stp_tf_alloc_task_work(tgt);
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
return UTRACE_RESUME;
}
stp_init_task_work(work, &__stp_tf_quiesce_worker);
rc = stp_task_work_add(tsk, work);
if (rc != 0 && rc != -ESRCH) {
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
}
}
else {
__stp_call_callbacks(tgt, tsk, 1, (tsk->pid == tsk->tgid));
if (tgt->mmap_events == 1 && tsk->tgid == tsk->pid) {
__stp_call_mmap_callbacks_for_task(tgt, tsk);
}
}
__stp_tf_handler_end();
return UTRACE_RESUME;
}
FIXMEstatic u32
__stp_utrace_task_finder_target_syscall_entry(u32 action,
struct utrace_engine *engine,
struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct stap_task_finder_target *tgt = engine->data;
long syscall_no;
unsigned long args[3] = { 0L };
int rc;
int is_mmap_or_mmap2 = 0;
int is_mprotect = 0;
int is_munmap = 0;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
if (unlikely(tgt == NULL))
return UTRACE_RESUME;
FIXME syscall_no = syscall_get_nr(tsk, regs);
is_mmap_or_mmap2 = (syscall_no == MMAP_SYSCALL_NO(tsk)
|| syscall_no == MMAP2_SYSCALL_NO(tsk) ? 1 : 0);
if (!is_mmap_or_mmap2) {
is_mprotect = (syscall_no == MPROTECT_SYSCALL_NO(tsk) ? 1 : 0);
if (!is_mprotect) {
is_munmap = (syscall_no == MUNMAP_SYSCALL_NO(tsk)
? 1 : 0);
}
}
if (!is_mmap_or_mmap2 && !is_mprotect && !is_munmap)
return UTRACE_RESUME;
if ((is_mmap_or_mmap2 && tgt->mmap_events == 0)
|| (is_mprotect && tgt->mprotect_events == 0)
|| (is_munmap && tgt->munmap_events == 0))
return UTRACE_RESUME;
__stp_tf_handler_start();
if (is_munmap) {
syscall_get_arguments(tsk, regs, 0, 2, args);
}
else if (is_mprotect) {
syscall_get_arguments(tsk, regs, 0, 3, args);
}
rc = __stp_tf_add_map(tsk, syscall_no, args[0], args[1], args[2]);
if (rc != 0)
_stp_error("__stp_tf_add_map returned error %d on pid %d",
rc, tsk->pid);
__stp_tf_handler_end();
return UTRACE_RESUME;
}
static void
__stp_tf_mmap_worker(struct task_work *work)
{
struct __stp_tf_task_work *tf_work = \
container_of(work, struct __stp_tf_task_work, work);
struct stap_task_finder_target *tgt = \
(struct stap_task_finder_target *)tf_work->data;
struct __stp_tf_map_entry *entry;
might_sleep();
__stp_tf_free_task_work(work);
entry = __stp_tf_get_map_entry(current);
if (entry == NULL) {
stp_task_work_func_done();
return;
}
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING
|| current->flags & PF_EXITING) {
__stp_tf_remove_map_entry(entry);
stp_task_work_func_done();
return;
}
__stp_tf_handler_start();
if (entry->syscall_no == MUNMAP_SYSCALL_NO(current)) {
__stp_call_munmap_callbacks(tgt, current, entry->arg0,
entry->arg1);
}
else if (entry->syscall_no == MMAP_SYSCALL_NO(current)
|| entry->syscall_no == MMAP2_SYSCALL_NO(current)) {
__stp_call_mmap_callbacks_with_addr(tgt, current, entry->arg0);
}
else { __stp_call_mprotect_callbacks(tgt, current, entry->arg0,
entry->arg1, entry->arg2);
}
__stp_tf_remove_map_entry(entry);
__stp_tf_handler_end();
stp_task_work_func_done();
return;
}
static u32
__stp_utrace_task_finder_target_syscall_exit(u32 action,
struct utrace_engine *engine,
struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct stap_task_finder_target *tgt = engine->data;
unsigned long rv;
struct __stp_tf_map_entry *entry;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
debug_task_finder_detach();
return UTRACE_DETACH;
}
if (tgt == NULL)
return UTRACE_RESUME;
entry = __stp_tf_get_map_entry(tsk);
if (entry == NULL)
return UTRACE_RESUME;
__stp_tf_handler_start();
rv = syscall_get_return_value(tsk, regs);
dbug_task_vma(1,
"tsk %d found %s(0x%lx), returned 0x%lx\n",
tsk->pid,
((entry->syscall_no == MMAP_SYSCALL_NO(tsk)) ? "mmap"
: ((entry->syscall_no == MMAP2_SYSCALL_NO(tsk)) ? "mmap2"
: ((entry->syscall_no == MPROTECT_SYSCALL_NO(tsk))
? "mprotect"
: ((entry->syscall_no == MUNMAP_SYSCALL_NO(tsk))
? "munmap"
: "UNKNOWN")))),
entry->arg0, rv);
if (in_atomic() || irqs_disabled()) {
struct task_work *work;
int rc;
if (entry->syscall_no == MMAP_SYSCALL_NO(tsk)
|| entry->syscall_no == MMAP2_SYSCALL_NO(tsk)) {
entry->arg0 = rv;
}
work = __stp_tf_alloc_task_work(tgt);
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
__stp_tf_remove_map_entry(entry);
__stp_tf_handler_end();
return UTRACE_RESUME;
}
stp_init_task_work(work, &__stp_tf_mmap_worker);
rc = stp_task_work_add(tsk, work);
if (rc != 0 && rc != -ESRCH) {
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
}
}
else {
if (entry->syscall_no == MUNMAP_SYSCALL_NO(tsk)) {
__stp_call_munmap_callbacks(tgt, tsk, entry->arg0,
entry->arg1);
}
else if (entry->syscall_no == MMAP_SYSCALL_NO(tsk)
|| entry->syscall_no == MMAP2_SYSCALL_NO(tsk)) {
__stp_call_mmap_callbacks_with_addr(tgt, tsk, rv);
}
else { __stp_call_mprotect_callbacks(tgt, tsk, entry->arg0,
entry->arg1, entry->arg2);
}
__stp_tf_remove_map_entry(entry);
}
__stp_tf_handler_end();
return UTRACE_RESUME;
}
static struct utrace_engine_ops __stp_utrace_task_finder_ops = {
.report_clone = __stp_utrace_task_finder_report_clone,
.report_exec = __stp_utrace_task_finder_report_exec,
.report_death = stap_utrace_task_finder_report_death,
};
static int
stap_start_task_finder(void)
{
int rc = 0;
struct task_struct *grp, *tsk;
char *mmpath_buf;
uid_t tsk_euid;
if (atomic_inc_return(&__stp_task_finder_state) != __STP_TF_STARTING) {
atomic_dec(&__stp_task_finder_state);
_stp_error("task_finder already started");
return EBUSY;
}
rc = utrace_init();
if (rc != 0) { atomic_dec(&__stp_task_finder_state);
_stp_error("Failed to initialize utrace hooks");
return ENOMEM; XXX }
mmpath_buf = _stp_kmalloc(PATH_MAX);
if (mmpath_buf == NULL) {
_stp_error("Unable to allocate space for path");
return ENOMEM;
}
__stp_tf_map_initialize();
atomic_set(&__stp_task_finder_state, __STP_TF_RUNNING);
rcu_read_lock();
do_each_thread(grp, tsk) {
struct mm_struct *mm;
char *mmpath;
size_t mmpathlen;
struct list_head *tgt_node;
if (_stp_target && tsk->tgid != _stp_target)
continue;
rc = __stp_utrace_attach(tsk, &__stp_utrace_task_finder_ops, 0,
__STP_TASK_FINDER_EVENTS,
UTRACE_RESUME);
if (rc == EPERM) {
rc = 0;
continue;
}
else if (rc != 0) {
goto stf_err;
}
task_lock(tsk);
if (! tsk->mm) {
continue;
}
mmpath = __stp_get_mm_path(tsk->mm, mmpath_buf, PATH_MAX);
task_unlock(tsk);
if (mmpath == NULL || IS_ERR(mmpath)) {
rc = -PTR_ERR(mmpath);
if (rc == ENOENT) {
rc = 0; continue;
}
else {
_stp_error("Unable to get path (error %d) for pid %d",
rc, (int)tsk->pid);
goto stf_err;
}
}
#ifdef STAPCONF_TASK_UID
tsk_euid = tsk->euid;
#else
#if defined(CONFIG_USER_NS) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
tsk_euid = from_kuid_munged(current_user_ns(), task_euid(tsk));
#else
tsk_euid = task_euid(tsk);
#endif
#endif
mmpathlen = strlen(mmpath);
list_for_each(tgt_node, &__stp_task_finder_list) {
struct stap_task_finder_target *tgt;
tgt = list_entry(tgt_node,
struct stap_task_finder_target, list);
if (tgt == NULL)
continue;
else if (tgt->pathlen > 0
&& (tgt->pathlen != mmpathlen
|| strcmp(tgt->procname, mmpath) != 0))
continue;
else if (tgt->pid != 0 && tgt->pid != tsk->pid)
continue;
#if ! STP_PRIVILEGE_CONTAINS (STP_PRIVILEGE, STP_PR_STAPDEV) && \
! STP_PRIVILEGE_CONTAINS (STP_PRIVILEGE, STP_PR_STAPSYS)
if (_stp_uid != tsk_euid) {
if (tgt->pid != 0 || _stp_target) {
_stp_warn("Process %d does not belong to unprivileged user %d",
tsk->pid, _stp_uid);
}
continue;
}
#endif
rc = __stp_utrace_attach(tsk, &tgt->ops, tgt,
__STP_ATTACHED_TASK_EVENTS,
UTRACE_STOP);
if (rc != 0 && rc != EPERM)
goto stf_err;
rc = 0; tgt->engine_attached = 1;
}
} while_each_thread(grp, tsk);
stf_err:
rcu_read_unlock();
_stp_kfree(mmpath_buf);
debug_task_finder_report(); return rc;
}
static void
stap_task_finder_post_init(void)
{
struct task_struct *grp, *tsk;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
_stp_error("task_finder not running?");
return;
}
#ifdef DEBUG_TASK_FINDER
printk(KERN_ERR "%s:%d - entry.\n", __FUNCTION__, __LINE__);
#endif
rcu_read_lock();
do_each_thread(grp, tsk) {
struct list_head *tgt_node;
if (atomic_read(&__stp_task_finder_state) != __STP_TF_RUNNING) {
#ifdef DEBUG_TASK_FINDER
printk(KERN_ERR "%s:%d - exiting early...\n",
__FUNCTION__, __LINE__);
#endif
break;
}
if (_stp_target && tsk->tgid != _stp_target)
continue;
if (tsk->tgid != tsk->pid)
continue;
list_for_each(tgt_node, &__stp_task_finder_list) {
struct stap_task_finder_target *tgt;
struct utrace_engine *engine;
tgt = list_entry(tgt_node,
struct stap_task_finder_target, list);
if (tgt == NULL || !tgt->engine_attached)
continue;
engine = utrace_attach_task(tsk,
UTRACE_ATTACH_MATCH_OPS,
&tgt->ops, tgt);
if (engine != NULL && !IS_ERR(engine)) {
int rc = utrace_control(tsk, engine,
UTRACE_INTERRUPT);
if (rc != 0 && rc != -EINPROGRESS) {
_stp_error("utrace_control returned error %d on pid %d",
rc, (int)tsk->pid);
}
utrace_engine_put(engine);
break;
}
}
} while_each_thread(grp, tsk);
rcu_read_unlock();
atomic_set(&__stp_task_finder_complete, 1);
#ifdef DEBUG_TASK_FINDER
printk(KERN_ERR "%s:%d - exit.\n", __FUNCTION__, __LINE__);
#endif
return;
}
static inline int
stap_task_finder_complete(void)
{
return atomic_read(&__stp_task_finder_complete) != 0;
}
static void
stap_stop_task_finder(void)
{
#ifdef DEBUG_TASK_FINDER
int i = 0;
printk(KERN_ERR "%s:%d - entry\n", __FUNCTION__, __LINE__);
#endif
if (atomic_read(&__stp_task_finder_state) == __STP_TF_UNITIALIZED)
return;
atomic_set(&__stp_task_finder_state, __STP_TF_STOPPING);
debug_task_finder_report();
utrace_shutdown();
debug_task_finder_report();
atomic_set(&__stp_task_finder_state, __STP_TF_STOPPED);
while (atomic_read(&__stp_inuse_count) != 0) {
schedule();
#ifdef DEBUG_TASK_FINDER
i++;
#endif
}
#ifdef DEBUG_TASK_FINDER
if (i > 0)
printk(KERN_ERR "it took %d polling loops to quit.\n", i);
#endif
debug_task_finder_report();
stp_task_work_exit();
__stp_tf_cancel_task_work();
utrace_exit();
#ifdef DEBUG_TASK_FINDER
printk(KERN_ERR "%s:%d - exit\n", __FUNCTION__, __LINE__);
#endif
}
#endif