#include static LIST_HEAD(__stp_task_finder_list); struct stap_task_finder_target; typedef int (*stap_task_finder_callback)(struct task_struct *tsk, int register_p, struct stap_task_finder_target *tgt); struct stap_task_finder_target { /* private: */ struct list_head list; /* __stp_task_finder_list linkage */ struct list_head callback_list_head; struct list_head callback_list; struct utrace_engine_ops ops; int engine_attached; size_t pathlen; /* public: */ const char *pathname; pid_t pid; stap_task_finder_callback callback; }; static int stap_register_task_finder_target(struct stap_task_finder_target *new_tgt) { // Since this __stp_task_finder_list is (currently) only // written to in one big setup operation before the task // finder process is started, we don't need to lock it. struct list_head *node; struct stap_task_finder_target *tgt = NULL; int found_node = 0; if (new_tgt->pathname != NULL) new_tgt->pathlen = strlen(new_tgt->pathname); else new_tgt->pathlen = 0; // Search the list for an existing entry for pathname/pid. list_for_each(node, &__stp_task_finder_list) { tgt = list_entry(node, struct stap_task_finder_target, list); if (tgt != NULL /* pathname-based target */ && ((new_tgt->pathlen > 0 && tgt->pathlen == new_tgt->pathlen && strcmp(tgt->pathname, new_tgt->pathname) == 0) /* pid-based target */ || (new_tgt->pid != 0 && tgt->pid == new_tgt->pid))) { found_node = 1; break; } } // If we didn't find a matching existing entry, add the new // target to the task list. if (! found_node) { INIT_LIST_HEAD(&new_tgt->callback_list_head); list_add(&new_tgt->list, &__stp_task_finder_list); tgt = new_tgt; } // Add this target to the callback list for this task. new_tgt->engine_attached = 0; list_add_tail(&new_tgt->callback_list, &tgt->callback_list_head); return 0; } static void stap_utrace_detach_ops(struct utrace_engine_ops *ops) { struct task_struct *tsk; struct utrace_attached_engine *engine; long error = 0; pid_t pid = 0; rcu_read_lock(); for_each_process(tsk) { struct mm_struct *mm; mm = get_task_mm(tsk); if (mm) { mmput(mm); engine = utrace_attach(tsk, UTRACE_ATTACH_MATCH_OPS, ops, 0); if (IS_ERR(engine)) { error = -PTR_ERR(engine); if (error != ENOENT) { pid = tsk->pid; break; } error = 0; } else if (engine != NULL) { utrace_detach(tsk, engine); } } } rcu_read_unlock(); if (error != 0) { _stp_error("utrace_attach returned error %d on pid %d", error, pid); } } static void __stp_task_finder_cleanup(void) { struct list_head *tgt_node, *tgt_next; struct list_head *cb_node, *cb_next; struct stap_task_finder_target *tgt; // Walk the main list, cleaning up as we go. list_for_each_safe(tgt_node, tgt_next, &__stp_task_finder_list) { tgt = list_entry(tgt_node, struct stap_task_finder_target, list); if (tgt == NULL) continue; list_for_each_safe(cb_node, cb_next, &tgt->callback_list_head) { struct stap_task_finder_target *cb_tgt; cb_tgt = list_entry(cb_node, struct stap_task_finder_target, callback_list); if (cb_tgt == NULL) continue; if (cb_tgt->engine_attached) { stap_utrace_detach_ops(&cb_tgt->ops); cb_tgt->engine_attached = 0; } list_del(&cb_tgt->callback_list); } list_del(&tgt->list); } } static char * __stp_get_mm_path(struct mm_struct *mm, char *buf, int buflen) { struct vm_area_struct *vma; char *rc = NULL; down_read(&mm->mmap_sem); vma = mm->mmap; while (vma) { if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) break; vma = vma->vm_next; } if (vma) { struct vfsmount *mnt = mntget(vma->vm_file->f_path.mnt); struct dentry *dentry = dget(vma->vm_file->f_path.dentry); rc = d_path(dentry, mnt, buf, buflen); dput(dentry); mntput(mnt); } else { *buf = '\0'; rc = ERR_PTR(ENOENT); } up_read(&mm->mmap_sem); return rc; } #define __STP_UTRACE_TASK_FINDER_EVENTS (UTRACE_EVENT(CLONE) \ | UTRACE_EVENT(EXEC)) #define __STP_UTRACE_ATTACHED_TASK_EVENTS (UTRACE_EVENT(DEATH)) static u32 __stp_utrace_task_finder_clone(struct utrace_attached_engine *engine, struct task_struct *parent, unsigned long clone_flags, struct task_struct *child) { struct utrace_attached_engine *child_engine; struct mm_struct *mm; // On clone, attach to the child. Ignore threads with no mm // (which are kernel threads). mm = get_task_mm(child); if (mm) { mmput(mm); child_engine = utrace_attach(child, UTRACE_ATTACH_CREATE, engine->ops, 0); if (IS_ERR(child_engine)) _stp_error("attach to clone child %d failed: %ld", (int)child->pid, PTR_ERR(child_engine)); else { utrace_set_flags(child, child_engine, __STP_UTRACE_TASK_FINDER_EVENTS); } } return UTRACE_ACTION_RESUME; } static u32 __stp_utrace_task_finder_death(struct utrace_attached_engine *engine, struct task_struct *tsk) { struct stap_task_finder_target *tgt = engine->data; // The first implementation of this added a // UTRACE_EVENT(DEATH) handler to // __stp_utrace_task_finder_ops. However, dead threads don't // have a mm_struct, so we can't find the exe's path. So, we // don't know which callback(s) to call. // // So, now when an "interesting" thread is found, we add a // separate UTRACE_EVENT(DEATH) handler for every probe. if (tgt != NULL && tgt->callback != NULL) { int rc; // Call the callback rc = tgt->callback(tsk, 0, tgt); if (rc != 0) { _stp_error("death callback for %d failed: %d", (int)tsk->pid, rc); } } return UTRACE_ACTION_RESUME; } static u32 __stp_utrace_task_finder_exec(struct utrace_attached_engine *engine, struct task_struct *tsk, const struct linux_binprm *bprm, struct pt_regs *regs) { size_t filelen; struct list_head *tgt_node; struct stap_task_finder_target *tgt; int found_node = 0; // On exec, check bprm if (bprm->filename == NULL) return UTRACE_ACTION_RESUME; filelen = strlen(bprm->filename); list_for_each(tgt_node, &__stp_task_finder_list) { tgt = list_entry(tgt_node, struct stap_task_finder_target, list); // Note that we don't bother with looking for pids // here, since they are handled at startup. if (tgt != NULL && tgt->pathlen > 0 && tgt->pathlen == filelen && strcmp(tgt->pathname, bprm->filename) == 0) { found_node = 1; break; } } if (found_node) { struct list_head *cb_node; list_for_each(cb_node, &tgt->callback_list_head) { struct stap_task_finder_target *cb_tgt; cb_tgt = list_entry(cb_node, struct stap_task_finder_target, callback_list); if (cb_tgt == NULL) continue; if (cb_tgt->callback != NULL) { int rc = cb_tgt->callback(tsk, 1, cb_tgt); if (rc != 0) { _stp_error("exec callback for %d failed: %d", (int)tsk->pid, rc); break; } } // Set up thread death notification. memset(&cb_tgt->ops, 0, sizeof(cb_tgt->ops)); cb_tgt->ops.report_death = &__stp_utrace_task_finder_death; engine = utrace_attach(tsk, UTRACE_ATTACH_CREATE, &cb_tgt->ops, cb_tgt); if (IS_ERR(engine)) { _stp_error("attach to exec'ed %d failed: %ld", (int)tsk->pid, PTR_ERR(engine)); } else { utrace_set_flags(tsk, engine, __STP_UTRACE_ATTACHED_TASK_EVENTS); cb_tgt->engine_attached = 1; } } } return UTRACE_ACTION_RESUME; } struct utrace_engine_ops __stp_utrace_task_finder_ops = { .report_clone = __stp_utrace_task_finder_clone, .report_exec = __stp_utrace_task_finder_exec, }; int stap_start_task_finder(void) { int rc = 0; struct task_struct *tsk; char *mmpath_buf; mmpath_buf = _stp_kmalloc(PATH_MAX); if (mmpath_buf == NULL) { _stp_error("Unable to allocate space for path"); return ENOMEM; } rcu_read_lock(); for_each_process(tsk) { struct utrace_attached_engine *engine; struct mm_struct *mm; char *mmpath; size_t mmpathlen; struct list_head *tgt_node; mm = get_task_mm(tsk); if (! mm) { /* If the thread doesn't have a mm_struct, it is * a kernel thread which we need to skip. */ continue; } /* Attach to the thread */ engine = utrace_attach(tsk, UTRACE_ATTACH_CREATE, &__stp_utrace_task_finder_ops, 0); if (IS_ERR(engine)) { int error = -PTR_ERR(engine); if (error != ENOENT) { mmput(mm); _stp_error("utrace_attach returned error %d on pid %d", error, (int)tsk->pid); rc = error; break; } } else if (unlikely(engine == NULL)) { mmput(mm); _stp_error("utrace_attach returned NULL on pid %d", (int)tsk->pid); rc = EFAULT; break; } utrace_set_flags(tsk, engine, __STP_UTRACE_TASK_FINDER_EVENTS); /* Check the thread's exe's path/pid against our list. */ mmpath = __stp_get_mm_path(mm, mmpath_buf, PATH_MAX); mmput(mm); /* We're done with mm */ if (IS_ERR(mmpath)) { rc = -PTR_ERR(mmpath); _stp_error("Unable to get path (error %d) for pid %d", rc, (int)tsk->pid); break; } mmpathlen = strlen(mmpath); list_for_each(tgt_node, &__stp_task_finder_list) { struct stap_task_finder_target *tgt; struct list_head *cb_node; tgt = list_entry(tgt_node, struct stap_task_finder_target, list); if (tgt == NULL) continue; /* pathname-based target */ else if (tgt->pathlen > 0 && (tgt->pathlen != mmpathlen || strcmp(tgt->pathname, mmpath) != 0)) continue; /* pid-based target */ else if (tgt->pid != 0 && tgt->pid != tsk->pid) continue; list_for_each(cb_node, &tgt->callback_list_head) { struct stap_task_finder_target *cb_tgt; cb_tgt = list_entry(cb_node, struct stap_task_finder_target, callback_list); if (cb_tgt == NULL || cb_tgt->callback == NULL) continue; // Call the callback. rc = cb_tgt->callback(tsk, 1, cb_tgt); if (rc != 0) { _stp_error("attach callback for %d failed: %d", (int)tsk->pid, rc); break; } } } } rcu_read_unlock(); _stp_kfree(mmpath_buf); return rc; } static void stap_stop_task_finder(void) { stap_utrace_detach_ops(&__stp_utrace_task_finder_ops); __stp_task_finder_cleanup(); } 0'>180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
/*
 * I2O user space accessible structures/APIs
 *
 * (c) Copyright 1999, 2000 Red Hat Software
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 *************************************************************************
 *
 * This header file defines the I2O APIs that are available to both
 * the kernel and user level applications.  Kernel specific structures
 * are defined in i2o_osm. OSMs should include _only_ i2o_osm.h which
 * automatically includes this file.
 *
 */

#ifndef _I2O_DEV_H
#define _I2O_DEV_H

/* How many controllers are we allowing */
#define MAX_I2O_CONTROLLERS	32

#include <linux/ioctl.h>
#include <linux/types.h>

/*
 * I2O Control IOCTLs and structures
 */
#define I2O_MAGIC_NUMBER	'i'
#define I2OGETIOPS		_IOR(I2O_MAGIC_NUMBER,0,__u8[MAX_I2O_CONTROLLERS])
#define I2OHRTGET		_IOWR(I2O_MAGIC_NUMBER,1,struct i2o_cmd_hrtlct)
#define I2OLCTGET		_IOWR(I2O_MAGIC_NUMBER,2,struct i2o_cmd_hrtlct)
#define I2OPARMSET		_IOWR(I2O_MAGIC_NUMBER,3,struct i2o_cmd_psetget)
#define I2OPARMGET		_IOWR(I2O_MAGIC_NUMBER,4,struct i2o_cmd_psetget)
#define I2OSWDL 		_IOWR(I2O_MAGIC_NUMBER,5,struct i2o_sw_xfer)
#define I2OSWUL 		_IOWR(I2O_MAGIC_NUMBER,6,struct i2o_sw_xfer)
#define I2OSWDEL		_IOWR(I2O_MAGIC_NUMBER,7,struct i2o_sw_xfer)
#define I2OVALIDATE		_IOR(I2O_MAGIC_NUMBER,8,__u32)
#define I2OHTML 		_IOWR(I2O_MAGIC_NUMBER,9,struct i2o_html)
#define I2OEVTREG		_IOW(I2O_MAGIC_NUMBER,10,struct i2o_evt_id)
#define I2OEVTGET		_IOR(I2O_MAGIC_NUMBER,11,struct i2o_evt_info)
#define I2OPASSTHRU		_IOR(I2O_MAGIC_NUMBER,12,struct i2o_cmd_passthru)
#define I2OPASSTHRU32		_IOR(I2O_MAGIC_NUMBER,12,struct i2o_cmd_passthru32)

struct i2o_cmd_passthru32 {
	unsigned int iop;	/* IOP unit number */
	__u32 msg;		/* message */
};

struct i2o_cmd_passthru {
	unsigned int iop;	/* IOP unit number */
	void __user *msg;	/* message */
};

struct i2o_cmd_hrtlct {
	unsigned int iop;	/* IOP unit number */
	void __user *resbuf;	/* Buffer for result */
	unsigned int __user *reslen;	/* Buffer length in bytes */
};

struct i2o_cmd_psetget {
	unsigned int iop;	/* IOP unit number */
	unsigned int tid;	/* Target device TID */
	void __user *opbuf;	/* Operation List buffer */
	unsigned int oplen;	/* Operation List buffer length in bytes */
	void __user *resbuf;	/* Result List buffer */
	unsigned int __user *reslen;	/* Result List buffer length in bytes */
};

struct i2o_sw_xfer {
	unsigned int iop;	/* IOP unit number */
	unsigned char flags;	/* Flags field */
	unsigned char sw_type;	/* Software type */
	unsigned int sw_id;	/* Software ID */
	void __user *buf;	/* Pointer to software buffer */
	unsigned int __user *swlen;	/* Length of software data */
	unsigned int __user *maxfrag;	/* Maximum fragment count */
	unsigned int __user *curfrag;	/* Current fragment count */
};

struct i2o_html {
	unsigned int iop;	/* IOP unit number */
	unsigned int tid;	/* Target device ID */
	unsigned int page;	/* HTML page */
	void __user *resbuf;	/* Buffer for reply HTML page */
	unsigned int __user *reslen;	/* Length in bytes of reply buffer */
	void __user *qbuf;	/* Pointer to HTTP query string */
	unsigned int qlen;	/* Length in bytes of query string buffer */
};

#define I2O_EVT_Q_LEN 32

struct i2o_evt_id {
	unsigned int iop;
	unsigned int tid;
	unsigned int evt_mask;
};

/* Event data size = frame size - message header + evt indicator */
#define I2O_EVT_DATA_SIZE 88

struct i2o_evt_info {
	struct i2o_evt_id id;
	unsigned char evt_data[I2O_EVT_DATA_SIZE];
	unsigned int data_size;
};

struct i2o_evt_get {
	struct i2o_evt_info info;
	int pending;
	int lost;
};

typedef struct i2o_sg_io_hdr {
	unsigned int flags;	/* see I2O_DPT_SG_IO_FLAGS */
} i2o_sg_io_hdr_t;

/**************************************************************************
 * HRT related constants and structures
 **************************************************************************/
#define I2O_BUS_LOCAL	0
#define I2O_BUS_ISA	1
#define I2O_BUS_EISA	2
#define I2O_BUS_MCA	3
#define I2O_BUS_PCI	4
#define I2O_BUS_PCMCIA	5
#define I2O_BUS_NUBUS	6
#define I2O_BUS_CARDBUS 7
#define I2O_BUS_UNKNOWN 0x80

typedef struct _i2o_pci_bus {
	__u8 PciFunctionNumber;
	__u8 PciDeviceNumber;
	__u8 PciBusNumber;
	__u8 reserved;
	__u16 PciVendorID;
	__u16 PciDeviceID;
} i2o_pci_bus;

typedef struct _i2o_local_bus {
	__u16 LbBaseIOPort;
	__u16 reserved;
	__u32 LbBaseMemoryAddress;
} i2o_local_bus;

typedef struct _i2o_isa_bus {
	__u16 IsaBaseIOPort;
	__u8 CSN;
	__u8 reserved;
	__u32 IsaBaseMemoryAddress;
} i2o_isa_bus;

typedef struct _i2o_eisa_bus_info {
	__u16 EisaBaseIOPort;
	__u8 reserved;
	__u8 EisaSlotNumber;
	__u32 EisaBaseMemoryAddress;
} i2o_eisa_bus;

typedef struct _i2o_mca_bus {
	__u16 McaBaseIOPort;
	__u8 reserved;
	__u8 McaSlotNumber;
	__u32 McaBaseMemoryAddress;
} i2o_mca_bus;

typedef struct _i2o_other_bus {
	__u16 BaseIOPort;
	__u16 reserved;
	__u32 BaseMemoryAddress;
} i2o_other_bus;

typedef struct _i2o_hrt_entry {
	__u32 adapter_id;
	__u32 parent_tid:12;
	__u32 state:4;
	__u32 bus_num:8;
	__u32 bus_type:8;
	union {
		i2o_pci_bus pci_bus;
		i2o_local_bus local_bus;
		i2o_isa_bus isa_bus;
		i2o_eisa_bus eisa_bus;
		i2o_mca_bus mca_bus;
		i2o_other_bus other_bus;
	} bus;
} i2o_hrt_entry;

typedef struct _i2o_hrt {
	__u16 num_entries;
	__u8 entry_len;
	__u8 hrt_version;
	__u32 change_ind;
	i2o_hrt_entry hrt_entry[1];
} i2o_hrt;

typedef struct _i2o_lct_entry {
	__u32 entry_size:16;
	__u32 tid:12;
	__u32 reserved:4;
	__u32 change_ind;
	__u32 device_flags;
	__u32 class_id:12;
	__u32 version:4;
	__u32 vendor_id:16;
	__u32 sub_class;
	__u32 user_tid:12;
	__u32 parent_tid:12;
	__u32 bios_info:8;
	__u8 identity_tag[8];
	__u32 event_capabilities;
} i2o_lct_entry;

typedef struct _i2o_lct {
	__u32 table_size:16;
	__u32 boot_tid:12;
	__u32 lct_ver:4;
	__u32 iop_flags;
	__u32 change_ind;
	i2o_lct_entry lct_entry[1];
} i2o_lct;

typedef struct _i2o_status_block {
	__u16 org_id;
	__u16 reserved;
	__u16 iop_id:12;
	__u16 reserved1:4;
	__u16 host_unit_id;
	__u16 segment_number:12;
	__u16 i2o_version:4;
	__u8 iop_state;
	__u8 msg_type;