47678 (608337), страница 4
Текст из файла (страница 4)
/* Per-type data */
union
{
struct
{
void __user *start;
size_t len;
} munmap;
struct
{
void __user *start;
size_t len;
unsigned long prot, flags;
unsigned long fd, off;
} mmap2;
struct
{
void __user *start[2];
size_t len[2];
unsigned flags;
} mremap;
struct
{
void __user *start;
size_t len;
} mlock, munlock;
struct
{
unsigned long flags;
} mlockall;
struct
{
void __user *addr;
} brk;
struct
{
int fd;
} fsync;
struct
{
void __user *addr;
int write;
} pagefault;
struct
{
char *callname;
long ret;
} callret;
};
};
#define NEVENTS (EVENTS_BUFLEN/sizeof (struct memmon_event))
/*
* Initializes event ringbuffer & creates /proc entry
*/
int init_events(void);
/*
* Destroys ringbuffer & removes /proc entry
*/
void fini_events(void);
/*
* Adds events to ringbuffer tail
*/
void put_event (const struct memmon_event *ev);
#endif // MEMMON_EVENTS_H
-
events.c
/*
* Events ringbuffer.
*/
#include
#include
#include
#include
#include
#include
#include
#include «common.h»
#include «events.h»
/*** Forward declarations ***/
static int events_open (struct inode *i, struct file *filp);
static unsigned events_poll (struct file *filp, struct poll_table_struct *pt);
static void *events_seqstart (struct seq_file *m, loff_t *pos);
static void events_seqstop (struct seq_file *m, void *p);
static void *events_seqnext (struct seq_file *m, void *p, loff_t *pos);
static int events_seqprint (struct seq_file *m, void *p);
/* Default ringbuffer size */
#define EVENTS_BUFLEN (32*1024)
/* Min ringbuffer size */
#define MIN_EVENTS_BUFLEN (8*1024)
/*** Module parameters ***/
/* Actual ringbuffer size */
static int buflen = EVENTS_BUFLEN;
module_param (buflen, int, 0444);
/*** File operations ***/
static const struct file_operations events_fops =
{
owner = THIS_MODULE,
open = events_open,
read = seq_read,
release = seq_release,
poll = events_poll
};
static const struct seq_operations events_seqop =
{
start = events_seqstart,
stop = events_seqstop,
next = events_seqnext,
show = events_seqprint
};
/*** Internal data ***/
/* Ringbuffer */
static struct memmon_event *events;
/* Last entry left in ringbuffer
* (where 1st read should begin) */
static int ev_start;
/* Current write position */
static int ev_end;
/* Whether there was ringbuffer overflow */
static int ev_ovf = 0;
DECLARE_WAIT_QUEUE_HEAD (ev_waitq);
spinlock_t ev_lock = SPIN_LOCK_UNLOCKED;
/* Damn seq_file doesn't update file pos when we return NULL iterator,
* so we first return this one and then NULL on next seqnext() call */
static void *dummy_ptr = &dummy_ptr;
/*** Entry points ***/
/*
* open() handler
*/
static int events_open (struct inode *i, struct file *filp)
{
int ret;
/*
* Ringbuffer is not seekable
*/
nonseekable_open (i, filp);
/*
* Open seq_file and set its initial pos
*/
ret = seq_open (filp, &events_seqop);
if (! ret)
{
struct seq_file *m = filp->private_data;
m->private = filp;
m->index = ev_start;
}
return ret;
}
/*
* poll/epoll() handler
*/
static unsigned events_poll (struct file *filp, struct poll_table_struct *pt)
{
struct seq_file *m = filp->private_data;
unsigned mask = 0;
spin_lock (&ev_lock);
poll_wait (filp, &ev_waitq, pt);
/*
* The only poll event we can trigger is normal read event
*/
if (m->index!= ev_end)
mask = POLLIN | POLLRDNORM;
spin_unlock (&ev_lock);
return mask;
}
/*
* Called by seq_file within read() request
*/
static void *events_seqstart (struct seq_file *m, loff_t *pos)
{
struct file *filp = m->private;
spin_lock (&ev_lock);
/*
* Wait for data become available
*/
while (*pos == (loff_t) ev_end)
{
void *err = NULL;
/* Can't schedule while atomic */
spin_unlock (&ev_lock);
if (filp->f_flags & O_NONBLOCK)
err = ERR_PTR(-EAGAIN);
else if (wait_event_interruptible (ev_waitq, *pos!= (loff_t) ev_end))
err = ERR_PTR(-ERESTARTSYS);
/*
* There IS a slim chance, that we loose waiting condition
* between awakening and acquiring spinlock – hence while() loop
*/
spin_lock (&ev_lock);
if (err)
return err;
}
return events + *pos;
}
/*
* Finish read() request
*/
static void events_seqstop (struct seq_file *m, void *p)
{
spin_unlock (&ev_lock);
}
/*
* Iterate to next event
*/
static void *events_seqnext (struct seq_file *m, void *p, loff_t *pos)
{
struct memmon_event *ev;
/* Dummy iterator – time to exit */
if (p == dummy_ptr)
return NULL;
++*pos;
ev = events + *pos;
/* Overflow */
if (ev – events > NEVENTS)
*pos = 0;
/*
* We reached end. Decrement file pos ('coz it will be incremented then back)
* and return dummy iterator (otherwise file pos won't be updated at all)
*/
if (*pos == (loff_t) ev_end)
{
–*pos;
return dummy_ptr;
}
return events + *pos;
}
/*
* Actually prints current iterator to read buffer
*/
static int events_seqprint (struct seq_file *m, void *p)
{
struct memmon_event *ev = p;
if (ev == dummy_ptr)
return 0;
seq_printf (m, «%d:», ev->pid);
switch (ev->type)
{
case MMAP2:
seq_printf (m, «mmap (%p,%u,», ev->mmap2.start, ev->mmap2.len);
if (ev->mmap2.prot & PROT_READ)
seq_puts (m, «r»);
else
seq_puts (m, «–»);
if (ev->mmap2.prot & PROT_WRITE)
seq_puts (m, «w»);
else
seq_puts (m, «–»);
if (ev->mmap2.prot & PROT_EXEC)
seq_puts (m, «x,»);
else
seq_puts (m, «-,»);
if (ev->mmap2.flags & MAP_SHARED)
seq_puts (m, «SHARED»);
else if (ev->mmap2.flags & MAP_PRIVATE)
seq_puts (m, «PRIVATE»);
if (ev->mmap2.flags & MAP_LOCKED)
seq_puts (m, «| LOCKED»);
if (ev->mmap2.flags & MAP_ANON)
seq_puts (m, «| ANON»);
if (ev->mmap2.flags & MAP_POPULATE)
seq_puts (m, «| READAHEAD»);
if (ev->mmap2.flags & MAP_ANON)
seq_puts (m,»)\n»);
else
seq_printf (m,», fd% ld, @%p)\n», (long) ev->mmap2.fd,
(void *) ev->mmap2.off);
break;
case MUNMAP:
seq_printf (m, «munmap (%p,%d)\n», ev->munmap.start, ev->munmap.len);
break;
case MREMAP:
seq_printf (m, «mremap (%p,%d ->%p,%d)\n», ev->mremap.start[0], ev->mremap.len[0],
ev->mremap.start[1], ev->mremap.len[1]);
break;
case MLOCK:
seq_printf (m, «mlock (%p,%d)\n», ev->mlock.start, ev->mlock.len);
break;
case MUNLOCK:
seq_printf (m, «munlock (%p,%d)\n», ev->munlock.start, ev->munlock.len);
break;
case MLOCKALL:
seq_puts (m, «mlockall(»);
if (ev->mlockall.flags & MCL_CURRENT)
{
seq_puts (m, «CURRENT»);
if (ev->mlockall.flags & MCL_FUTURE)
seq_puts (m, «| FUTURE»);
}
else if (ev->mlockall.flags & MCL_FUTURE)
seq_puts (m, «FUTURE»);
seq_puts (m,»)\n»);
break;
case MUNLOCKALL:
seq_puts (m, «munlockall()\n»);
break;
case BRK:
seq_printf (m, «brk(%p)\n», ev->brk.addr);
break;
case FSYNC:
seq_printf (m, «fsync(%d)\n», ev->fsync.fd);
break;
case ANON_PF:
seq_printf (m, «anon page @%p (%s)\n», ev->pagefault.addr,
ev->pagefault.write? «W»: «R»);
break;
case SWAP_PF:
seq_printf (m, «swapfile page @%p (%s)\n», ev->pagefault.addr,
ev->pagefault.write? «W»: «R»);
break;
case FILE_PF:
seq_printf (m, «shared file page @%p (%s)\n», ev->pagefault.addr,
ev->pagefault.write? «W»: «R»);
break;
case SYSCALLRET:
seq_printf (m, «%s ->%ld (%p)\n», ev->callret.callname, ev->callret.ret,
(void *) ev->callret.ret);
break;
default:
printk («memmon: Unexpected event% d\n», ev->type);
return 1;
}
return 0;
}
/*** Exported entries ***/
/*
* Initializes event ringbuffer & creates /proc entry
*/
int init_events(void)
{
struct proc_dir_entry *entry;
buflen = max (buflen, MIN_EVENTS_BUFLEN);
events = kzalloc (buflen, GFP_KERNEL);
if (! events)
{
printk («memmon: Event ringbuffer too big!\n»);
return 0;
}
ev_start = ev_end = 0;
entry = create_proc_entry (EVENTS_ENTRY, 0444, procdir);
if (entry)
entry->proc_fops = &events_fops;
else
{
kfree(events);
return 0;
}
return 1;
}
/*
* Destroys ringbuffer & removes /proc entry
*/
void fini_events(void)
{
remove_proc_entry (EVENTS_ENTRY, procdir);
kfree(events);
}
/*
* Adds events to ringbuffer tail
*/
void put_event (const struct memmon_event *ev)
{
spin_lock (&ev_lock);
events [ev_end] = *ev;
/* Overflow */
if (++ev_end > NEVENTS)
{
ev_start = ev_end = 0;
ev_ovf = 1;
}
/*
* If overflow happened at least once, ev_start must be next to ev_end.
* Otherwise, it remains zero.
*/
if (ev_ovf && ++ev_start > NEVENTS)
ev_start = 0;
spin_unlock (&ev_lock);
wake_up_interruptible_sync (&ev_waitq);
}
-
watch-pids.h
/*
* Selection of PIDs to watch for.
*/
#ifndef MEMMON_WATCH_PIDS_H
#define MEMMON_WATCH_PIDS_H
/*
* Checks whether PID @pid is present in PID set
* Returns 1 if present
*/
int pid_present (pid_t pid);
/*
* Initializes PID set & creates /proc entry
*/
int init_watch_pids(void);
/*
* Destroys PID set & removes /proc entry
*/
void fini_watch_pids(void);
#endif // MEMMON_WATCH_PIDS_H
-
watch-pids.c
/*
* Selection of PIDs to watch for.
*/
#include
#include
#include
#include
#include
#include
#include
#include «common.h»
#include «watch-pids.h»
/*** Forward declarations ***/
static int watch_pids_open (struct inode *i, struct file *filp);
static int watch_pids_release (struct inode *i, struct file *filp);
static ssize_t watch_pids_read (struct file *filp, char __user *buf, size_t count, loff_t *off);
static ssize_t watch_pids_write (struct file *filp, const char __user *buf,
size_t count, loff_t *offp);
/*** Internal data ***/
/* Filename in procfs directory */
#define WATCHPID_ENTRY «watch-pids»
#define PID_COUNT PID_MAX_DEFAULT + 1
/* PIDs are stored in one single bitmap for 8192 entries
* This is VERY RARELY unacceptable */
static DECLARE_BITMAP (watched_pids, PID_COUNT);
/*** File operations ***/
static const struct file_operations watch_pids_fops =
{
owner = THIS_MODULE,
open = watch_pids_open,
read = watch_pids_read,
write = watch_pids_write,
release = watch_pids_release
};
/*** Entry points ***/
/*
* open() handler
*/
static int watch_pids_open (struct inode *i, struct file *filp)
{
try_module_get (THIS_MODULE);
/*
* If file opened for read, print PID set to internal buffer
*/
if (filp->f_mode & FMODE_READ)
{
const int FDATA_SIZ = 32*1024;
char *fdata;
int len;
/*
* Disallow mixed RW-access
*/
if (filp->f_mode & FMODE_WRITE)
return – EINVAL;