runtime/task_finder_vma.c - systemtap
Global variables defined
Data types defined
Functions defined
Macros defined
Source code
#ifndef TASK_FINDER_VMA_C
#define TASK_FINDER_VMA_C
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/fs.h>
#include <linux/dcache.h>
#include "stp_helper_lock.h"
static STP_DEFINE_RWLOCK(__stp_tf_vma_lock);
#define __STP_TF_HASH_BITS 4
#define __STP_TF_TABLE_SIZE (1 << __STP_TF_HASH_BITS)
#ifndef TASK_FINDER_VMA_ENTRY_PATHLEN
#define TASK_FINDER_VMA_ENTRY_PATHLEN 64
#elif TASK_FINDER_VMA_ENTRY_PATHLEN < 8
#error "gimme a little more TASK_FINDER_VMA_ENTRY_PATHLEN"
#endif
struct __stp_tf_vma_entry {
struct hlist_node hlist;
pid_t pid;
unsigned long vm_start;
unsigned long vm_end;
char path[TASK_FINDER_VMA_ENTRY_PATHLEN];
void *user;
};
static struct hlist_head *__stp_tf_vma_map;
static struct __stp_tf_vma_entry *
__stp_tf_vma_new_entry(void)
{
struct __stp_tf_vma_entry *entry;
size_t size = sizeof (struct __stp_tf_vma_entry);
#ifdef CONFIG_UTRACE
entry = (struct __stp_tf_vma_entry *) _stp_kmalloc_gfp(size,
STP_ALLOC_SLEEP_FLAGS);
#else
entry = (struct __stp_tf_vma_entry *) _stp_kmalloc_gfp(size,
STP_ALLOC_FLAGS);
#endif
return entry;
}
static void
__stp_tf_vma_release_entry(struct __stp_tf_vma_entry *entry)
{
_stp_kfree (entry);
}
static int
stap_initialize_vma_map(void)
{
size_t size = sizeof(struct hlist_head) * __STP_TF_TABLE_SIZE;
struct hlist_head *map = (struct hlist_head *) _stp_kzalloc_gfp(size,
STP_ALLOC_SLEEP_FLAGS);
if (map == NULL)
return -ENOMEM;
__stp_tf_vma_map = map;
return 0;
}
static void
stap_destroy_vma_map(void)
{
if (__stp_tf_vma_map != NULL) {
int i;
for (i = 0; i < __STP_TF_TABLE_SIZE; i++) {
struct hlist_head *head = &__stp_tf_vma_map[i];
struct hlist_node *node;
struct hlist_node *n;
struct __stp_tf_vma_entry *entry = NULL;
if (hlist_empty(head))
continue;
stap_hlist_for_each_entry_safe(entry, node, n, head, hlist) {
hlist_del(&entry->hlist);
__stp_tf_vma_release_entry(entry);
}
}
_stp_kfree(__stp_tf_vma_map);
}
}
static inline u32
__stp_tf_vma_map_hash(struct task_struct *tsk)
{
return (jhash_1word(tsk->pid, 0) & (__STP_TF_TABLE_SIZE - 1));
}
static struct __stp_tf_vma_entry *
__stp_tf_get_vma_map_entry_internal(struct task_struct *tsk,
unsigned long vm_start)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
stap_hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& vm_start == entry->vm_start) {
return entry;
}
}
return NULL;
}
static struct __stp_tf_vma_entry *
__stp_tf_get_vma_map_entry_end_internal(struct task_struct *tsk,
unsigned long vm_end)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
stap_hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& vm_end == entry->vm_end) {
return entry;
}
}
return NULL;
}
static int
stap_add_vma_map_info(struct task_struct *tsk,
unsigned long vm_start, unsigned long vm_end,
const char *path, void *user)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
struct __stp_tf_vma_entry *new_entry;
unsigned long flags;
new_entry = __stp_tf_vma_new_entry();
stp_write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
if (entry != NULL) {
stp_write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
if (new_entry)
__stp_tf_vma_release_entry(new_entry);
return -EBUSY; }
if (!new_entry) {
stp_write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return -ENOMEM;
}
entry = new_entry;
entry->pid = tsk->pid;
entry->vm_start = vm_start;
entry->vm_end = vm_end;
if (strlen(path) >= TASK_FINDER_VMA_ENTRY_PATHLEN-3)
{
strncpy (entry->path, "...", TASK_FINDER_VMA_ENTRY_PATHLEN);
strlcpy (entry->path+3, &path[strlen(path)-TASK_FINDER_VMA_ENTRY_PATHLEN+4],
TASK_FINDER_VMA_ENTRY_PATHLEN-3);
}
else
{
strlcpy (entry->path, path, TASK_FINDER_VMA_ENTRY_PATHLEN);
}
entry->user = user;
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
hlist_add_head(&entry->hlist, head);
stp_write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
static int
stap_extend_vma_map_info(struct task_struct *tsk,
unsigned long vm_start, unsigned long vm_end)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
unsigned long flags;
int res = -ESRCH;
stp_write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_end_internal(tsk, vm_start);
if (entry != NULL) {
entry->vm_end = vm_end;
res = 0;
}
stp_write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return res;
}
static int
stap_remove_vma_map_info(struct task_struct *tsk, unsigned long vm_start)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
int rc = -ESRCH;
unsigned long flags;
stp_write_lock_irqsave(&__stp_tf_vma_lock, flags);
entry = __stp_tf_get_vma_map_entry_internal(tsk, vm_start);
if (entry != NULL) {
hlist_del(&entry->hlist);
__stp_tf_vma_release_entry(entry);
rc = 0;
}
stp_write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return rc;
}
static int
stap_find_vma_map_info(struct task_struct *tsk, unsigned long addr,
unsigned long *vm_start, unsigned long *vm_end,
const char **path, void **user)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
struct __stp_tf_vma_entry *found_entry = NULL;
int rc = -ESRCH;
unsigned long flags;
if (__stp_tf_vma_map == NULL)
return rc;
stp_read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
stap_hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& addr >= entry->vm_start
&& addr < entry->vm_end) {
found_entry = entry;
break;
}
}
if (found_entry != NULL) {
if (vm_start != NULL)
*vm_start = found_entry->vm_start;
if (vm_end != NULL)
*vm_end = found_entry->vm_end;
if (path != NULL)
*path = found_entry->path;
if (user != NULL)
*user = found_entry->user;
rc = 0;
}
stp_read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return rc;
}
static int
stap_find_vma_map_info_user(struct task_struct *tsk, void *user,
unsigned long *vm_start, unsigned long *vm_end,
const char **path)
{
struct hlist_head *head;
struct hlist_node *node;
struct __stp_tf_vma_entry *entry;
struct __stp_tf_vma_entry *found_entry = NULL;
int rc = -ESRCH;
unsigned long flags;
if (__stp_tf_vma_map == NULL)
return rc;
stp_read_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
stap_hlist_for_each_entry(entry, node, head, hlist) {
if (tsk->pid == entry->pid
&& user == entry->user) {
found_entry = entry;
break;
}
}
if (found_entry != NULL) {
if (vm_start != NULL)
*vm_start = found_entry->vm_start;
if (vm_end != NULL)
*vm_end = found_entry->vm_end;
if (path != NULL)
*path = found_entry->path;
rc = 0;
}
stp_read_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return rc;
}
static int
stap_drop_vma_maps(struct task_struct *tsk)
{
struct hlist_head *head;
struct hlist_node *node;
struct hlist_node *n;
struct __stp_tf_vma_entry *entry;
unsigned long flags;
stp_write_lock_irqsave(&__stp_tf_vma_lock, flags);
head = &__stp_tf_vma_map[__stp_tf_vma_map_hash(tsk)];
stap_hlist_for_each_entry_safe(entry, node, n, head, hlist) {
if (tsk->pid == entry->pid) {
hlist_del(&entry->hlist);
__stp_tf_vma_release_entry(entry);
}
}
stp_write_unlock_irqrestore(&__stp_tf_vma_lock, flags);
return 0;
}
static struct file*
stap_find_exe_file(struct mm_struct* mm)
{
#ifdef VM_EXECUTABLE
struct vm_area_struct *vma;
for (vma = mm->mmap; vma; vma = vma->vm_next)
if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
return vma->vm_file;
return NULL;
#else
return mm->exe_file;
#endif
}
#endif