1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file implements the perfmon-2 subsystem which is used
4 * to program the IA-64 Performance Monitoring Unit (PMU).
6 * The initial version of perfmon.c was written by
7 * Ganesh Venkitachalam, IBM Corp.
9 * Then it was modified for perfmon-1.x by Stephane Eranian and
10 * David Mosberger, Hewlett Packard Co.
12 * Version Perfmon-2.x is a rewrite of perfmon-1.x
13 * by Stephane Eranian, Hewlett Packard Co.
15 * Copyright (C) 1999-2005 Hewlett Packard Co
16 * Stephane Eranian <eranian@hpl.hp.com>
17 * David Mosberger-Tang <davidm@hpl.hp.com>
19 * More information about perfmon available at:
20 * http://www.hpl.hp.com/research/linux/perfmon
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/interrupt.h>
29 #include <linux/proc_fs.h>
30 #include <linux/seq_file.h>
31 #include <linux/init.h>
32 #include <linux/vmalloc.h>
34 #include <linux/sysctl.h>
35 #include <linux/list.h>
36 #include <linux/file.h>
37 #include <linux/poll.h>
38 #include <linux/vfs.h>
39 #include <linux/smp.h>
40 #include <linux/pagemap.h>
41 #include <linux/mount.h>
42 #include <linux/pseudo_fs.h>
43 #include <linux/bitops.h>
44 #include <linux/capability.h>
45 #include <linux/rcupdate.h>
46 #include <linux/completion.h>
47 #include <linux/tracehook.h>
48 #include <linux/slab.h>
49 #include <linux/cpu.h>
51 #include <asm/errno.h>
52 #include <asm/intrinsics.h>
54 #include <asm/perfmon.h>
55 #include <asm/processor.h>
56 #include <asm/signal.h>
57 #include <linux/uaccess.h>
58 #include <asm/delay.h>
64 * perfmon context state
66 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
67 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
68 #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
69 #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
71 #define PFM_INVALID_ACTIVATION (~0UL)
73 #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
74 #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
77 * depth of message queue
79 #define PFM_MAX_MSGS 32
80 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
83 * type of a PMU register (bitmask).
85 * bit0 : register implemented
88 * bit4 : pmc has pmc.pm
89 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
90 * bit6-7 : register type
93 #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
94 #define PFM_REG_IMPL 0x1 /* register implemented */
95 #define PFM_REG_END 0x2 /* end marker */
96 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
97 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
98 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
99 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
100 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
102 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
103 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
105 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
107 /* i assumed unsigned */
108 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
109 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
111 /* XXX: these assume that register i is implemented */
112 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
113 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
114 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
115 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
117 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
118 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
119 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
120 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
122 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
123 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
125 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
126 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
127 #define PFM_CTX_TASK(h) (h)->ctx_task
129 #define PMU_PMC_OI 5 /* position of pmc.oi bit */
131 /* XXX: does not support more than 64 PMDs */
132 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
133 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
135 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
137 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
138 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
139 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
140 #define PFM_CODE_RR 0 /* requesting code range restriction */
141 #define PFM_DATA_RR 1 /* requestion data range restriction */
143 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
144 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
145 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
147 #define RDEP(x) (1UL<<(x))
150 * context protection macros
152 * - we need to protect against CPU concurrency (spin_lock)
153 * - we need to protect against PMU overflow interrupts (local_irq_disable)
155 * - we need to protect against PMU overflow interrupts (local_irq_disable)
157 * spin_lock_irqsave()/spin_unlock_irqrestore():
158 * in SMP: local_irq_disable + spin_lock
159 * in UP : local_irq_disable
161 * spin_lock()/spin_lock():
162 * in UP : removed automatically
163 * in SMP: protect against context accesses from other CPU. interrupts
164 * are not masked. This is useful for the PMU interrupt handler
165 * because we know we will not get PMU concurrency in that code.
167 #define PROTECT_CTX(c, f) \
169 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 spin_lock_irqsave(&(c)->ctx_lock, f); \
171 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
174 #define UNPROTECT_CTX(c, f) \
176 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
177 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
180 #define PROTECT_CTX_NOPRINT(c, f) \
182 spin_lock_irqsave(&(c)->ctx_lock, f); \
186 #define UNPROTECT_CTX_NOPRINT(c, f) \
188 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
192 #define PROTECT_CTX_NOIRQ(c) \
194 spin_lock(&(c)->ctx_lock); \
197 #define UNPROTECT_CTX_NOIRQ(c) \
199 spin_unlock(&(c)->ctx_lock); \
205 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
206 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
207 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
209 #else /* !CONFIG_SMP */
210 #define SET_ACTIVATION(t) do {} while(0)
211 #define GET_ACTIVATION(t) do {} while(0)
212 #define INC_ACTIVATION(t) do {} while(0)
213 #endif /* CONFIG_SMP */
215 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
216 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
217 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
219 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
220 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
222 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
225 * cmp0 must be the value of pmc0
227 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
229 #define PFMFS_MAGIC 0xa0b4d889
234 #define PFM_DEBUGGING 1
238 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
241 #define DPRINT_ovfl(a) \
243 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
248 * 64-bit software counter structure
250 * the next_reset_type is applied to the next call to pfm_reset_regs()
253 unsigned long val; /* virtual 64bit counter value */
254 unsigned long lval; /* last reset value */
255 unsigned long long_reset; /* reset value on sampling overflow */
256 unsigned long short_reset; /* reset value on overflow */
257 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
258 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
259 unsigned long seed; /* seed for random-number generator */
260 unsigned long mask; /* mask for random-number generator */
261 unsigned int flags; /* notify/do not notify */
262 unsigned long eventid; /* overflow event identifier */
269 unsigned int block:1; /* when 1, task will blocked on user notifications */
270 unsigned int system:1; /* do system wide monitoring */
271 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
272 unsigned int is_sampling:1; /* true if using a custom format */
273 unsigned int excl_idle:1; /* exclude idle task in system wide session */
274 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
275 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
276 unsigned int no_msg:1; /* no message sent on overflow */
277 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
278 unsigned int reserved:22;
279 } pfm_context_flags_t;
281 #define PFM_TRAP_REASON_NONE 0x0 /* default value */
282 #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
283 #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
287 * perfmon context: encapsulates all the state of a monitoring session
290 typedef struct pfm_context {
291 spinlock_t ctx_lock; /* context protection */
293 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
294 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
296 struct task_struct *ctx_task; /* task to which context is attached */
298 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
300 struct completion ctx_restart_done; /* use for blocking notification mode */
302 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
303 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
304 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
306 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
307 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
308 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
310 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
312 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
313 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
314 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
315 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
317 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
319 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
320 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
322 unsigned long ctx_saved_psr_up; /* only contains psr.up value */
324 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
325 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
326 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
328 int ctx_fd; /* file descriptor used my this context */
329 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
331 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
332 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
333 unsigned long ctx_smpl_size; /* size of sampling buffer */
334 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
336 wait_queue_head_t ctx_msgq_wait;
337 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
340 struct fasync_struct *ctx_async_queue;
342 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
346 * magic number used to verify that structure is really
349 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
351 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
354 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
355 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
357 #define SET_LAST_CPU(ctx, v) do {} while(0)
358 #define GET_LAST_CPU(ctx) do {} while(0)
362 #define ctx_fl_block ctx_flags.block
363 #define ctx_fl_system ctx_flags.system
364 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
365 #define ctx_fl_is_sampling ctx_flags.is_sampling
366 #define ctx_fl_excl_idle ctx_flags.excl_idle
367 #define ctx_fl_going_zombie ctx_flags.going_zombie
368 #define ctx_fl_trap_reason ctx_flags.trap_reason
369 #define ctx_fl_no_msg ctx_flags.no_msg
370 #define ctx_fl_can_restart ctx_flags.can_restart
372 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
373 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
376 * global information about all sessions
377 * mostly used to synchronize between system wide and per-process
380 spinlock_t pfs_lock; /* lock the structure */
382 unsigned int pfs_task_sessions; /* number of per task sessions */
383 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
384 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
385 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
386 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
390 * information about a PMC or PMD.
391 * dep_pmd[]: a bitmask of dependent PMD registers
392 * dep_pmc[]: a bitmask of dependent PMC registers
394 typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
398 unsigned long default_value; /* power-on default value */
399 unsigned long reserved_mask; /* bitmask of reserved bits */
400 pfm_reg_check_t read_check;
401 pfm_reg_check_t write_check;
402 unsigned long dep_pmd[4];
403 unsigned long dep_pmc[4];
406 /* assume cnum is a valid monitor */
407 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
410 * This structure is initialized at boot time and contains
411 * a description of the PMU main characteristics.
413 * If the probe function is defined, detection is based
414 * on its return value:
415 * - 0 means recognized PMU
416 * - anything else means not supported
417 * When the probe function is not defined, then the pmu_family field
418 * is used and it must match the host CPU family such that:
419 * - cpu->family & config->pmu_family != 0
422 unsigned long ovfl_val; /* overflow value for counters */
424 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
425 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
427 unsigned int num_pmcs; /* number of PMCS: computed at init time */
428 unsigned int num_pmds; /* number of PMDS: computed at init time */
429 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
430 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
432 char *pmu_name; /* PMU family name */
433 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
434 unsigned int flags; /* pmu specific flags */
435 unsigned int num_ibrs; /* number of IBRS: computed at init time */
436 unsigned int num_dbrs; /* number of DBRS: computed at init time */
437 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
438 int (*probe)(void); /* customized probe routine */
439 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
444 #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
447 * debug register related type definitions
450 unsigned long ibr_mask:56;
451 unsigned long ibr_plm:4;
452 unsigned long ibr_ig:3;
453 unsigned long ibr_x:1;
457 unsigned long dbr_mask:56;
458 unsigned long dbr_plm:4;
459 unsigned long dbr_ig:2;
460 unsigned long dbr_w:1;
461 unsigned long dbr_r:1;
472 * perfmon command descriptions
475 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
478 unsigned int cmd_narg;
480 int (*cmd_getsize)(void *arg, size_t *sz);
483 #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
484 #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
485 #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
486 #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
489 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
490 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
491 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
492 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
493 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
495 #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
498 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
499 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
500 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
501 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
502 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
503 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
504 unsigned long pfm_smpl_handler_calls;
505 unsigned long pfm_smpl_handler_cycles;
506 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
510 * perfmon internal variables
512 static pfm_stats_t pfm_stats[NR_CPUS];
513 static pfm_session_t pfm_sessions; /* global sessions information */
515 static DEFINE_SPINLOCK(pfm_alt_install_check);
516 static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
518 static struct proc_dir_entry *perfmon_dir;
519 static pfm_uuid_t pfm_null_uuid = {0,};
521 static spinlock_t pfm_buffer_fmt_lock;
522 static LIST_HEAD(pfm_buffer_fmt_list);
524 static pmu_config_t *pmu_conf;
526 /* sysctl() controls */
527 pfm_sysctl_t pfm_sysctl;
528 EXPORT_SYMBOL(pfm_sysctl);
530 static struct ctl_table pfm_ctl_table[] = {
533 .data = &pfm_sysctl.debug,
534 .maxlen = sizeof(int),
536 .proc_handler = proc_dointvec,
539 .procname = "debug_ovfl",
540 .data = &pfm_sysctl.debug_ovfl,
541 .maxlen = sizeof(int),
543 .proc_handler = proc_dointvec,
546 .procname = "fastctxsw",
547 .data = &pfm_sysctl.fastctxsw,
548 .maxlen = sizeof(int),
550 .proc_handler = proc_dointvec,
553 .procname = "expert_mode",
554 .data = &pfm_sysctl.expert_mode,
555 .maxlen = sizeof(int),
557 .proc_handler = proc_dointvec,
561 static struct ctl_table pfm_sysctl_dir[] = {
563 .procname = "perfmon",
565 .child = pfm_ctl_table,
569 static struct ctl_table pfm_sysctl_root[] = {
571 .procname = "kernel",
573 .child = pfm_sysctl_dir,
577 static struct ctl_table_header *pfm_sysctl_header;
579 static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
581 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
582 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
585 pfm_put_task(struct task_struct *task)
587 if (task != current) put_task_struct(task);
590 static inline unsigned long
591 pfm_protect_ctx_ctxsw(pfm_context_t *x)
593 spin_lock(&(x)->ctx_lock);
598 pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
600 spin_unlock(&(x)->ctx_lock);
603 /* forward declaration */
604 static const struct dentry_operations pfmfs_dentry_operations;
606 static int pfmfs_init_fs_context(struct fs_context *fc)
608 struct pseudo_fs_context *ctx = init_pseudo(fc, PFMFS_MAGIC);
611 ctx->dops = &pfmfs_dentry_operations;
615 static struct file_system_type pfm_fs_type = {
617 .init_fs_context = pfmfs_init_fs_context,
618 .kill_sb = kill_anon_super,
620 MODULE_ALIAS_FS("pfmfs");
622 DEFINE_PER_CPU(unsigned long, pfm_syst_info);
623 DEFINE_PER_CPU(struct task_struct *, pmu_owner);
624 DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
625 DEFINE_PER_CPU(unsigned long, pmu_activation_number);
626 EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
629 /* forward declaration */
630 static const struct file_operations pfm_file_ops;
633 * forward declarations
636 static void pfm_lazy_save_regs (struct task_struct *ta);
639 void dump_pmu_state(const char *);
640 static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
642 #include "perfmon_itanium.h"
643 #include "perfmon_mckinley.h"
644 #include "perfmon_montecito.h"
645 #include "perfmon_generic.h"
647 static pmu_config_t *pmu_confs[]={
651 &pmu_conf_gen, /* must be last */
656 static int pfm_end_notify_user(pfm_context_t *ctx);
659 pfm_clear_psr_pp(void)
661 ia64_rsm(IA64_PSR_PP);
668 ia64_ssm(IA64_PSR_PP);
673 pfm_clear_psr_up(void)
675 ia64_rsm(IA64_PSR_UP);
682 ia64_ssm(IA64_PSR_UP);
686 static inline unsigned long
690 tmp = ia64_getreg(_IA64_REG_PSR);
696 pfm_set_psr_l(unsigned long val)
698 ia64_setreg(_IA64_REG_PSR_L, val);
710 pfm_unfreeze_pmu(void)
717 pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
721 for (i=0; i < nibrs; i++) {
722 ia64_set_ibr(i, ibrs[i]);
723 ia64_dv_serialize_instruction();
729 pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
733 for (i=0; i < ndbrs; i++) {
734 ia64_set_dbr(i, dbrs[i]);
735 ia64_dv_serialize_data();
741 * PMD[i] must be a counter. no check is made
743 static inline unsigned long
744 pfm_read_soft_counter(pfm_context_t *ctx, int i)
746 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
750 * PMD[i] must be a counter. no check is made
753 pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
755 unsigned long ovfl_val = pmu_conf->ovfl_val;
757 ctx->ctx_pmds[i].val = val & ~ovfl_val;
759 * writing to unimplemented part is ignore, so we do not need to
762 ia64_set_pmd(i, val & ovfl_val);
766 pfm_get_new_msg(pfm_context_t *ctx)
770 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
772 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
773 if (next == ctx->ctx_msgq_head) return NULL;
775 idx = ctx->ctx_msgq_tail;
776 ctx->ctx_msgq_tail = next;
778 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
780 return ctx->ctx_msgq+idx;
784 pfm_get_next_msg(pfm_context_t *ctx)
788 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
790 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
795 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
800 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
802 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
808 pfm_reset_msgq(pfm_context_t *ctx)
810 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
811 DPRINT(("ctx=%p msgq reset\n", ctx));
814 static pfm_context_t *
815 pfm_context_alloc(int ctx_flags)
820 * allocate context descriptor
821 * must be able to free with interrupts disabled
823 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
825 DPRINT(("alloc ctx @%p\n", ctx));
828 * init context protection lock
830 spin_lock_init(&ctx->ctx_lock);
833 * context is unloaded
835 ctx->ctx_state = PFM_CTX_UNLOADED;
838 * initialization of context's flags
840 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
841 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
842 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
844 * will move to set properties
845 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
849 * init restart semaphore to locked
851 init_completion(&ctx->ctx_restart_done);
854 * activation is used in SMP only
856 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
857 SET_LAST_CPU(ctx, -1);
860 * initialize notification message queue
862 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
863 init_waitqueue_head(&ctx->ctx_msgq_wait);
864 init_waitqueue_head(&ctx->ctx_zombieq);
871 pfm_context_free(pfm_context_t *ctx)
874 DPRINT(("free ctx @%p\n", ctx));
880 pfm_mask_monitoring(struct task_struct *task)
882 pfm_context_t *ctx = PFM_GET_CTX(task);
883 unsigned long mask, val, ovfl_mask;
886 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
888 ovfl_mask = pmu_conf->ovfl_val;
890 * monitoring can only be masked as a result of a valid
891 * counter overflow. In UP, it means that the PMU still
892 * has an owner. Note that the owner can be different
893 * from the current task. However the PMU state belongs
895 * In SMP, a valid overflow only happens when task is
896 * current. Therefore if we come here, we know that
897 * the PMU state belongs to the current task, therefore
898 * we can access the live registers.
900 * So in both cases, the live register contains the owner's
901 * state. We can ONLY touch the PMU registers and NOT the PSR.
903 * As a consequence to this call, the ctx->th_pmds[] array
904 * contains stale information which must be ignored
905 * when context is reloaded AND monitoring is active (see
908 mask = ctx->ctx_used_pmds[0];
909 for (i = 0; mask; i++, mask>>=1) {
910 /* skip non used pmds */
911 if ((mask & 0x1) == 0) continue;
912 val = ia64_get_pmd(i);
914 if (PMD_IS_COUNTING(i)) {
916 * we rebuild the full 64 bit value of the counter
918 ctx->ctx_pmds[i].val += (val & ovfl_mask);
920 ctx->ctx_pmds[i].val = val;
922 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
924 ctx->ctx_pmds[i].val,
928 * mask monitoring by setting the privilege level to 0
929 * we cannot use psr.pp/psr.up for this, it is controlled by
932 * if task is current, modify actual registers, otherwise modify
933 * thread save state, i.e., what will be restored in pfm_load_regs()
935 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
936 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
937 if ((mask & 0x1) == 0UL) continue;
938 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
939 ctx->th_pmcs[i] &= ~0xfUL;
940 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
943 * make all of this visible
949 * must always be done with task == current
951 * context must be in MASKED state when calling
954 pfm_restore_monitoring(struct task_struct *task)
956 pfm_context_t *ctx = PFM_GET_CTX(task);
957 unsigned long mask, ovfl_mask;
958 unsigned long psr, val;
961 is_system = ctx->ctx_fl_system;
962 ovfl_mask = pmu_conf->ovfl_val;
964 if (task != current) {
965 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
968 if (ctx->ctx_state != PFM_CTX_MASKED) {
969 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
970 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
975 * monitoring is masked via the PMC.
976 * As we restore their value, we do not want each counter to
977 * restart right away. We stop monitoring using the PSR,
978 * restore the PMC (and PMD) and then re-establish the psr
979 * as it was. Note that there can be no pending overflow at
980 * this point, because monitoring was MASKED.
982 * system-wide session are pinned and self-monitoring
984 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
986 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
992 * first, we restore the PMD
994 mask = ctx->ctx_used_pmds[0];
995 for (i = 0; mask; i++, mask>>=1) {
996 /* skip non used pmds */
997 if ((mask & 0x1) == 0) continue;
999 if (PMD_IS_COUNTING(i)) {
1001 * we split the 64bit value according to
1004 val = ctx->ctx_pmds[i].val & ovfl_mask;
1005 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1007 val = ctx->ctx_pmds[i].val;
1009 ia64_set_pmd(i, val);
1011 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1013 ctx->ctx_pmds[i].val,
1019 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1020 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1021 if ((mask & 0x1) == 0UL) continue;
1022 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1023 ia64_set_pmc(i, ctx->th_pmcs[i]);
1024 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1025 task_pid_nr(task), i, ctx->th_pmcs[i]));
1030 * must restore DBR/IBR because could be modified while masked
1031 * XXX: need to optimize
1033 if (ctx->ctx_fl_using_dbreg) {
1034 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1035 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1041 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1043 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1050 pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1056 for (i=0; mask; i++, mask>>=1) {
1057 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1062 * reload from thread state (used for ctxw only)
1065 pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1068 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1070 for (i=0; mask; i++, mask>>=1) {
1071 if ((mask & 0x1) == 0) continue;
1072 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1073 ia64_set_pmd(i, val);
1079 * propagate PMD from context to thread-state
1082 pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1084 unsigned long ovfl_val = pmu_conf->ovfl_val;
1085 unsigned long mask = ctx->ctx_all_pmds[0];
1089 DPRINT(("mask=0x%lx\n", mask));
1091 for (i=0; mask; i++, mask>>=1) {
1093 val = ctx->ctx_pmds[i].val;
1096 * We break up the 64 bit value into 2 pieces
1097 * the lower bits go to the machine state in the
1098 * thread (will be reloaded on ctxsw in).
1099 * The upper part stays in the soft-counter.
1101 if (PMD_IS_COUNTING(i)) {
1102 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1105 ctx->th_pmds[i] = val;
1107 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1110 ctx->ctx_pmds[i].val));
1115 * propagate PMC from context to thread-state
1118 pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1120 unsigned long mask = ctx->ctx_all_pmcs[0];
1123 DPRINT(("mask=0x%lx\n", mask));
1125 for (i=0; mask; i++, mask>>=1) {
1126 /* masking 0 with ovfl_val yields 0 */
1127 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1128 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1135 pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1139 for (i=0; mask; i++, mask>>=1) {
1140 if ((mask & 0x1) == 0) continue;
1141 ia64_set_pmc(i, pmcs[i]);
1147 pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1149 return memcmp(a, b, sizeof(pfm_uuid_t));
1153 pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1156 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1161 pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1164 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1170 pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1174 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1179 pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1183 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1188 pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1191 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1196 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1199 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1203 static pfm_buffer_fmt_t *
1204 __pfm_find_buffer_fmt(pfm_uuid_t uuid)
1206 struct list_head * pos;
1207 pfm_buffer_fmt_t * entry;
1209 list_for_each(pos, &pfm_buffer_fmt_list) {
1210 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1211 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1218 * find a buffer format based on its uuid
1220 static pfm_buffer_fmt_t *
1221 pfm_find_buffer_fmt(pfm_uuid_t uuid)
1223 pfm_buffer_fmt_t * fmt;
1224 spin_lock(&pfm_buffer_fmt_lock);
1225 fmt = __pfm_find_buffer_fmt(uuid);
1226 spin_unlock(&pfm_buffer_fmt_lock);
1231 pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1235 /* some sanity checks */
1236 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1238 /* we need at least a handler */
1239 if (fmt->fmt_handler == NULL) return -EINVAL;
1242 * XXX: need check validity of fmt_arg_size
1245 spin_lock(&pfm_buffer_fmt_lock);
1247 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1248 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1252 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1253 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1256 spin_unlock(&pfm_buffer_fmt_lock);
1259 EXPORT_SYMBOL(pfm_register_buffer_fmt);
1262 pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1264 pfm_buffer_fmt_t *fmt;
1267 spin_lock(&pfm_buffer_fmt_lock);
1269 fmt = __pfm_find_buffer_fmt(uuid);
1271 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1275 list_del_init(&fmt->fmt_list);
1276 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1279 spin_unlock(&pfm_buffer_fmt_lock);
1283 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1286 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1288 unsigned long flags;
1290 * validity checks on cpu_mask have been done upstream
1294 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1295 pfm_sessions.pfs_sys_sessions,
1296 pfm_sessions.pfs_task_sessions,
1297 pfm_sessions.pfs_sys_use_dbregs,
1303 * cannot mix system wide and per-task sessions
1305 if (pfm_sessions.pfs_task_sessions > 0UL) {
1306 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1307 pfm_sessions.pfs_task_sessions));
1311 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1313 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1315 pfm_sessions.pfs_sys_session[cpu] = task;
1317 pfm_sessions.pfs_sys_sessions++ ;
1320 if (pfm_sessions.pfs_sys_sessions) goto abort;
1321 pfm_sessions.pfs_task_sessions++;
1324 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1325 pfm_sessions.pfs_sys_sessions,
1326 pfm_sessions.pfs_task_sessions,
1327 pfm_sessions.pfs_sys_use_dbregs,
1332 * Force idle() into poll mode
1334 cpu_idle_poll_ctrl(true);
1341 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1342 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1352 pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1354 unsigned long flags;
1356 * validity checks on cpu_mask have been done upstream
1360 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1361 pfm_sessions.pfs_sys_sessions,
1362 pfm_sessions.pfs_task_sessions,
1363 pfm_sessions.pfs_sys_use_dbregs,
1369 pfm_sessions.pfs_sys_session[cpu] = NULL;
1371 * would not work with perfmon+more than one bit in cpu_mask
1373 if (ctx && ctx->ctx_fl_using_dbreg) {
1374 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1375 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1377 pfm_sessions.pfs_sys_use_dbregs--;
1380 pfm_sessions.pfs_sys_sessions--;
1382 pfm_sessions.pfs_task_sessions--;
1384 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1385 pfm_sessions.pfs_sys_sessions,
1386 pfm_sessions.pfs_task_sessions,
1387 pfm_sessions.pfs_sys_use_dbregs,
1391 /* Undo forced polling. Last session reenables pal_halt */
1392 cpu_idle_poll_ctrl(false);
1400 * removes virtual mapping of the sampling buffer.
1401 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1402 * a PROTECT_CTX() section.
1405 pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1407 struct task_struct *task = current;
1411 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1412 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1416 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1419 * does the actual unmapping
1421 r = vm_munmap((unsigned long)vaddr, size);
1424 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1427 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1433 * free actual physical storage used by sampling buffer
1437 pfm_free_smpl_buffer(pfm_context_t *ctx)
1439 pfm_buffer_fmt_t *fmt;
1441 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1444 * we won't use the buffer format anymore
1446 fmt = ctx->ctx_buf_fmt;
1448 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1451 ctx->ctx_smpl_vaddr));
1453 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1458 vfree(ctx->ctx_smpl_hdr);
1460 ctx->ctx_smpl_hdr = NULL;
1461 ctx->ctx_smpl_size = 0UL;
1466 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1472 pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1474 if (fmt == NULL) return;
1476 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1481 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1482 * no real gain from having the whole whorehouse mounted. So we don't need
1483 * any operations on the root directory. However, we need a non-trivial
1484 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1486 static struct vfsmount *pfmfs_mnt __read_mostly;
1491 int err = register_filesystem(&pfm_fs_type);
1493 pfmfs_mnt = kern_mount(&pfm_fs_type);
1494 err = PTR_ERR(pfmfs_mnt);
1495 if (IS_ERR(pfmfs_mnt))
1496 unregister_filesystem(&pfm_fs_type);
1504 pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1509 unsigned long flags;
1510 DECLARE_WAITQUEUE(wait, current);
1511 if (PFM_IS_FILE(filp) == 0) {
1512 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1516 ctx = filp->private_data;
1518 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1523 * check even when there is no message
1525 if (size < sizeof(pfm_msg_t)) {
1526 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1530 PROTECT_CTX(ctx, flags);
1533 * put ourselves on the wait queue
1535 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1543 set_current_state(TASK_INTERRUPTIBLE);
1545 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1548 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1550 UNPROTECT_CTX(ctx, flags);
1553 * check non-blocking read
1556 if(filp->f_flags & O_NONBLOCK) break;
1559 * check pending signals
1561 if(signal_pending(current)) {
1566 * no message, so wait
1570 PROTECT_CTX(ctx, flags);
1572 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1573 set_current_state(TASK_RUNNING);
1574 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1576 if (ret < 0) goto abort;
1579 msg = pfm_get_next_msg(ctx);
1581 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1585 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1588 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1591 UNPROTECT_CTX(ctx, flags);
1597 pfm_write(struct file *file, const char __user *ubuf,
1598 size_t size, loff_t *ppos)
1600 DPRINT(("pfm_write called\n"));
1605 pfm_poll(struct file *filp, poll_table * wait)
1608 unsigned long flags;
1611 if (PFM_IS_FILE(filp) == 0) {
1612 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1616 ctx = filp->private_data;
1618 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1623 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1625 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1627 PROTECT_CTX(ctx, flags);
1629 if (PFM_CTXQ_EMPTY(ctx) == 0)
1630 mask = EPOLLIN | EPOLLRDNORM;
1632 UNPROTECT_CTX(ctx, flags);
1634 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1640 pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1642 DPRINT(("pfm_ioctl called\n"));
1647 * interrupt cannot be masked when coming here
1650 pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1654 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1656 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1657 task_pid_nr(current),
1660 ctx->ctx_async_queue, ret));
1666 pfm_fasync(int fd, struct file *filp, int on)
1671 if (PFM_IS_FILE(filp) == 0) {
1672 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1676 ctx = filp->private_data;
1678 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1682 * we cannot mask interrupts during this call because this may
1683 * may go to sleep if memory is not readily avalaible.
1685 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1686 * done in caller. Serialization of this function is ensured by caller.
1688 ret = pfm_do_fasync(fd, filp, ctx, on);
1691 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1694 ctx->ctx_async_queue, ret));
1701 * this function is exclusively called from pfm_close().
1702 * The context is not protected at that time, nor are interrupts
1703 * on the remote CPU. That's necessary to avoid deadlocks.
1706 pfm_syswide_force_stop(void *info)
1708 pfm_context_t *ctx = (pfm_context_t *)info;
1709 struct pt_regs *regs = task_pt_regs(current);
1710 struct task_struct *owner;
1711 unsigned long flags;
1714 if (ctx->ctx_cpu != smp_processor_id()) {
1715 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1717 smp_processor_id());
1720 owner = GET_PMU_OWNER();
1721 if (owner != ctx->ctx_task) {
1722 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1724 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1727 if (GET_PMU_CTX() != ctx) {
1728 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1730 GET_PMU_CTX(), ctx);
1734 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1736 * the context is already protected in pfm_close(), we simply
1737 * need to mask interrupts to avoid a PMU interrupt race on
1740 local_irq_save(flags);
1742 ret = pfm_context_unload(ctx, NULL, 0, regs);
1744 DPRINT(("context_unload returned %d\n", ret));
1748 * unmask interrupts, PMU interrupts are now spurious here
1750 local_irq_restore(flags);
1754 pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1758 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1759 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1760 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1762 #endif /* CONFIG_SMP */
1765 * called for each close(). Partially free resources.
1766 * When caller is self-monitoring, the context is unloaded.
1769 pfm_flush(struct file *filp, fl_owner_t id)
1772 struct task_struct *task;
1773 struct pt_regs *regs;
1774 unsigned long flags;
1775 unsigned long smpl_buf_size = 0UL;
1776 void *smpl_buf_vaddr = NULL;
1777 int state, is_system;
1779 if (PFM_IS_FILE(filp) == 0) {
1780 DPRINT(("bad magic for\n"));
1784 ctx = filp->private_data;
1786 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1791 * remove our file from the async queue, if we use this mode.
1792 * This can be done without the context being protected. We come
1793 * here when the context has become unreachable by other tasks.
1795 * We may still have active monitoring at this point and we may
1796 * end up in pfm_overflow_handler(). However, fasync_helper()
1797 * operates with interrupts disabled and it cleans up the
1798 * queue. If the PMU handler is called prior to entering
1799 * fasync_helper() then it will send a signal. If it is
1800 * invoked after, it will find an empty queue and no
1801 * signal will be sent. In both case, we are safe
1803 PROTECT_CTX(ctx, flags);
1805 state = ctx->ctx_state;
1806 is_system = ctx->ctx_fl_system;
1808 task = PFM_CTX_TASK(ctx);
1809 regs = task_pt_regs(task);
1811 DPRINT(("ctx_state=%d is_current=%d\n",
1813 task == current ? 1 : 0));
1816 * if state == UNLOADED, then task is NULL
1820 * we must stop and unload because we are losing access to the context.
1822 if (task == current) {
1825 * the task IS the owner but it migrated to another CPU: that's bad
1826 * but we must handle this cleanly. Unfortunately, the kernel does
1827 * not provide a mechanism to block migration (while the context is loaded).
1829 * We need to release the resource on the ORIGINAL cpu.
1831 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1833 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1835 * keep context protected but unmask interrupt for IPI
1837 local_irq_restore(flags);
1839 pfm_syswide_cleanup_other_cpu(ctx);
1842 * restore interrupt masking
1844 local_irq_save(flags);
1847 * context is unloaded at this point
1850 #endif /* CONFIG_SMP */
1853 DPRINT(("forcing unload\n"));
1855 * stop and unload, returning with state UNLOADED
1856 * and session unreserved.
1858 pfm_context_unload(ctx, NULL, 0, regs);
1860 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1865 * remove virtual mapping, if any, for the calling task.
1866 * cannot reset ctx field until last user is calling close().
1868 * ctx_smpl_vaddr must never be cleared because it is needed
1869 * by every task with access to the context
1871 * When called from do_exit(), the mm context is gone already, therefore
1872 * mm is NULL, i.e., the VMA is already gone and we do not have to
1875 if (ctx->ctx_smpl_vaddr && current->mm) {
1876 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1877 smpl_buf_size = ctx->ctx_smpl_size;
1880 UNPROTECT_CTX(ctx, flags);
1883 * if there was a mapping, then we systematically remove it
1884 * at this point. Cannot be done inside critical section
1885 * because some VM function reenables interrupts.
1888 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1893 * called either on explicit close() or from exit_files().
1894 * Only the LAST user of the file gets to this point, i.e., it is
1897 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1898 * (fput()),i.e, last task to access the file. Nobody else can access the
1899 * file at this point.
1901 * When called from exit_files(), the VMA has been freed because exit_mm()
1902 * is executed before exit_files().
1904 * When called from exit_files(), the current task is not yet ZOMBIE but we
1905 * flush the PMU state to the context.
1908 pfm_close(struct inode *inode, struct file *filp)
1911 struct task_struct *task;
1912 struct pt_regs *regs;
1913 DECLARE_WAITQUEUE(wait, current);
1914 unsigned long flags;
1915 unsigned long smpl_buf_size = 0UL;
1916 void *smpl_buf_addr = NULL;
1917 int free_possible = 1;
1918 int state, is_system;
1920 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1922 if (PFM_IS_FILE(filp) == 0) {
1923 DPRINT(("bad magic\n"));
1927 ctx = filp->private_data;
1929 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1933 PROTECT_CTX(ctx, flags);
1935 state = ctx->ctx_state;
1936 is_system = ctx->ctx_fl_system;
1938 task = PFM_CTX_TASK(ctx);
1939 regs = task_pt_regs(task);
1941 DPRINT(("ctx_state=%d is_current=%d\n",
1943 task == current ? 1 : 0));
1946 * if task == current, then pfm_flush() unloaded the context
1948 if (state == PFM_CTX_UNLOADED) goto doit;
1951 * context is loaded/masked and task != current, we need to
1952 * either force an unload or go zombie
1956 * The task is currently blocked or will block after an overflow.
1957 * we must force it to wakeup to get out of the
1958 * MASKED state and transition to the unloaded state by itself.
1960 * This situation is only possible for per-task mode
1962 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1965 * set a "partial" zombie state to be checked
1966 * upon return from down() in pfm_handle_work().
1968 * We cannot use the ZOMBIE state, because it is checked
1969 * by pfm_load_regs() which is called upon wakeup from down().
1970 * In such case, it would free the context and then we would
1971 * return to pfm_handle_work() which would access the
1972 * stale context. Instead, we set a flag invisible to pfm_load_regs()
1973 * but visible to pfm_handle_work().
1975 * For some window of time, we have a zombie context with
1976 * ctx_state = MASKED and not ZOMBIE
1978 ctx->ctx_fl_going_zombie = 1;
1981 * force task to wake up from MASKED state
1983 complete(&ctx->ctx_restart_done);
1985 DPRINT(("waking up ctx_state=%d\n", state));
1988 * put ourself to sleep waiting for the other
1989 * task to report completion
1991 * the context is protected by mutex, therefore there
1992 * is no risk of being notified of completion before
1993 * begin actually on the waitq.
1995 set_current_state(TASK_INTERRUPTIBLE);
1996 add_wait_queue(&ctx->ctx_zombieq, &wait);
1998 UNPROTECT_CTX(ctx, flags);
2001 * XXX: check for signals :
2002 * - ok for explicit close
2003 * - not ok when coming from exit_files()
2008 PROTECT_CTX(ctx, flags);
2011 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2012 set_current_state(TASK_RUNNING);
2015 * context is unloaded at this point
2017 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2019 else if (task != current) {
2022 * switch context to zombie state
2024 ctx->ctx_state = PFM_CTX_ZOMBIE;
2026 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2028 * cannot free the context on the spot. deferred until
2029 * the task notices the ZOMBIE state
2033 pfm_context_unload(ctx, NULL, 0, regs);
2038 /* reload state, may have changed during opening of critical section */
2039 state = ctx->ctx_state;
2042 * the context is still attached to a task (possibly current)
2043 * we cannot destroy it right now
2047 * we must free the sampling buffer right here because
2048 * we cannot rely on it being cleaned up later by the
2049 * monitored task. It is not possible to free vmalloc'ed
2050 * memory in pfm_load_regs(). Instead, we remove the buffer
2051 * now. should there be subsequent PMU overflow originally
2052 * meant for sampling, the will be converted to spurious
2053 * and that's fine because the monitoring tools is gone anyway.
2055 if (ctx->ctx_smpl_hdr) {
2056 smpl_buf_addr = ctx->ctx_smpl_hdr;
2057 smpl_buf_size = ctx->ctx_smpl_size;
2058 /* no more sampling */
2059 ctx->ctx_smpl_hdr = NULL;
2060 ctx->ctx_fl_is_sampling = 0;
2063 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2069 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2072 * UNLOADED that the session has already been unreserved.
2074 if (state == PFM_CTX_ZOMBIE) {
2075 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2079 * disconnect file descriptor from context must be done
2082 filp->private_data = NULL;
2085 * if we free on the spot, the context is now completely unreachable
2086 * from the callers side. The monitored task side is also cut, so we
2089 * If we have a deferred free, only the caller side is disconnected.
2091 UNPROTECT_CTX(ctx, flags);
2094 * All memory free operations (especially for vmalloc'ed memory)
2095 * MUST be done with interrupts ENABLED.
2097 vfree(smpl_buf_addr);
2100 * return the memory used by the context
2102 if (free_possible) pfm_context_free(ctx);
2107 static const struct file_operations pfm_file_ops = {
2108 .llseek = no_llseek,
2112 .unlocked_ioctl = pfm_ioctl,
2113 .fasync = pfm_fasync,
2114 .release = pfm_close,
2118 static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2120 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2121 d_inode(dentry)->i_ino);
2124 static const struct dentry_operations pfmfs_dentry_operations = {
2125 .d_delete = always_delete_dentry,
2126 .d_dname = pfmfs_dname,
2130 static struct file *
2131 pfm_alloc_file(pfm_context_t *ctx)
2134 struct inode *inode;
2136 struct qstr this = { .name = "" };
2139 * allocate a new inode
2141 inode = new_inode(pfmfs_mnt->mnt_sb);
2143 return ERR_PTR(-ENOMEM);
2145 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2147 inode->i_mode = S_IFCHR|S_IRUGO;
2148 inode->i_uid = current_fsuid();
2149 inode->i_gid = current_fsgid();
2152 * allocate a new dcache entry
2154 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2157 return ERR_PTR(-ENOMEM);
2159 path.mnt = mntget(pfmfs_mnt);
2161 d_add(path.dentry, inode);
2163 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2169 file->f_flags = O_RDONLY;
2170 file->private_data = ctx;
2176 pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2178 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2181 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2184 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2195 * allocate a sampling buffer and remaps it into the user address space of the task
2198 pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2200 struct mm_struct *mm = task->mm;
2201 struct vm_area_struct *vma = NULL;
2207 * the fixed header + requested size and align to page boundary
2209 size = PAGE_ALIGN(rsize);
2211 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2214 * check requested size to avoid Denial-of-service attacks
2215 * XXX: may have to refine this test
2216 * Check against address space limit.
2218 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2221 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2225 * We do the easy to undo allocations first.
2227 smpl_buf = vzalloc(size);
2228 if (smpl_buf == NULL) {
2229 DPRINT(("Can't allocate sampling buffer\n"));
2233 DPRINT(("smpl_buf @%p\n", smpl_buf));
2236 vma = vm_area_alloc(mm);
2238 DPRINT(("Cannot allocate vma\n"));
2243 * partially initialize the vma for the sampling buffer
2245 vma->vm_file = get_file(filp);
2246 vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
2247 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2250 * Now we have everything we need and we can initialize
2251 * and connect all the data structures
2254 ctx->ctx_smpl_hdr = smpl_buf;
2255 ctx->ctx_smpl_size = size; /* aligned size */
2258 * Let's do the difficult operations next.
2260 * now we atomically find some area in the address space and
2261 * remap the buffer in it.
2263 mmap_write_lock(task->mm);
2265 /* find some free area in address space, must have mmap sem held */
2266 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2267 if (IS_ERR_VALUE(vma->vm_start)) {
2268 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2269 mmap_write_unlock(task->mm);
2272 vma->vm_end = vma->vm_start + size;
2273 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2275 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2277 /* can only be applied to current task, need to have the mm semaphore held when called */
2278 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2279 DPRINT(("Can't remap buffer\n"));
2280 mmap_write_unlock(task->mm);
2285 * now insert the vma in the vm list for the process, must be
2286 * done with mmap lock held
2288 insert_vm_struct(mm, vma);
2290 vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
2291 mmap_write_unlock(task->mm);
2294 * keep track of user level virtual address
2296 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2297 *(unsigned long *)user_vaddr = vma->vm_start;
2310 * XXX: do something better here
2313 pfm_bad_permissions(struct task_struct *task)
2315 const struct cred *tcred;
2316 kuid_t uid = current_uid();
2317 kgid_t gid = current_gid();
2321 tcred = __task_cred(task);
2323 /* inspired by ptrace_attach() */
2324 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2325 from_kuid(&init_user_ns, uid),
2326 from_kgid(&init_user_ns, gid),
2327 from_kuid(&init_user_ns, tcred->euid),
2328 from_kuid(&init_user_ns, tcred->suid),
2329 from_kuid(&init_user_ns, tcred->uid),
2330 from_kgid(&init_user_ns, tcred->egid),
2331 from_kgid(&init_user_ns, tcred->sgid)));
2333 ret = ((!uid_eq(uid, tcred->euid))
2334 || (!uid_eq(uid, tcred->suid))
2335 || (!uid_eq(uid, tcred->uid))
2336 || (!gid_eq(gid, tcred->egid))
2337 || (!gid_eq(gid, tcred->sgid))
2338 || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
2345 pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2351 ctx_flags = pfx->ctx_flags;
2353 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2356 * cannot block in this mode
2358 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2359 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2364 /* probably more to add here */
2370 pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2371 unsigned int cpu, pfarg_context_t *arg)
2373 pfm_buffer_fmt_t *fmt = NULL;
2374 unsigned long size = 0UL;
2376 void *fmt_arg = NULL;
2378 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2380 /* invoke and lock buffer format, if found */
2381 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2383 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2388 * buffer argument MUST be contiguous to pfarg_context_t
2390 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2392 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2394 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2396 if (ret) goto error;
2398 /* link buffer format and context */
2399 ctx->ctx_buf_fmt = fmt;
2400 ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
2403 * check if buffer format wants to use perfmon buffer allocation/mapping service
2405 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2406 if (ret) goto error;
2410 * buffer is always remapped into the caller's address space
2412 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2413 if (ret) goto error;
2415 /* keep track of user address of buffer */
2416 arg->ctx_smpl_vaddr = uaddr;
2418 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2425 pfm_reset_pmu_state(pfm_context_t *ctx)
2430 * install reset values for PMC.
2432 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2433 if (PMC_IS_IMPL(i) == 0) continue;
2434 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2435 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2438 * PMD registers are set to 0UL when the context in memset()
2442 * On context switched restore, we must restore ALL pmc and ALL pmd even
2443 * when they are not actively used by the task. In UP, the incoming process
2444 * may otherwise pick up left over PMC, PMD state from the previous process.
2445 * As opposed to PMD, stale PMC can cause harm to the incoming
2446 * process because they may change what is being measured.
2447 * Therefore, we must systematically reinstall the entire
2448 * PMC state. In SMP, the same thing is possible on the
2449 * same CPU but also on between 2 CPUs.
2451 * The problem with PMD is information leaking especially
2452 * to user level when psr.sp=0
2454 * There is unfortunately no easy way to avoid this problem
2455 * on either UP or SMP. This definitively slows down the
2456 * pfm_load_regs() function.
2460 * bitmask of all PMCs accessible to this context
2462 * PMC0 is treated differently.
2464 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2467 * bitmask of all PMDs that are accessible to this context
2469 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2471 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2474 * useful in case of re-enable after disable
2476 ctx->ctx_used_ibrs[0] = 0UL;
2477 ctx->ctx_used_dbrs[0] = 0UL;
2481 pfm_ctx_getsize(void *arg, size_t *sz)
2483 pfarg_context_t *req = (pfarg_context_t *)arg;
2484 pfm_buffer_fmt_t *fmt;
2488 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2490 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2492 DPRINT(("cannot find buffer format\n"));
2495 /* get just enough to copy in user parameters */
2496 *sz = fmt->fmt_arg_size;
2497 DPRINT(("arg_size=%lu\n", *sz));
2505 * cannot attach if :
2507 * - task not owned by caller
2508 * - task incompatible with context mode
2511 pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2514 * no kernel task or task not owner by caller
2516 if (task->mm == NULL) {
2517 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2520 if (pfm_bad_permissions(task)) {
2521 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2525 * cannot block in self-monitoring mode
2527 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2528 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2532 if (task->exit_state == EXIT_ZOMBIE) {
2533 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2538 * always ok for self
2540 if (task == current) return 0;
2542 if (!task_is_stopped_or_traced(task)) {
2543 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2547 * make sure the task is off any CPU
2549 wait_task_inactive(task, 0);
2551 /* more to come... */
2557 pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2559 struct task_struct *p = current;
2562 /* XXX: need to add more checks here */
2563 if (pid < 2) return -EPERM;
2565 if (pid != task_pid_vnr(current)) {
2566 /* make sure task cannot go away while we operate on it */
2567 p = find_get_task_by_vpid(pid);
2572 ret = pfm_task_incompatible(ctx, p);
2575 } else if (p != current) {
2584 pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2586 pfarg_context_t *req = (pfarg_context_t *)arg;
2593 /* let's check the arguments first */
2594 ret = pfarg_is_sane(current, req);
2598 ctx_flags = req->ctx_flags;
2602 fd = get_unused_fd_flags(0);
2606 ctx = pfm_context_alloc(ctx_flags);
2610 filp = pfm_alloc_file(ctx);
2612 ret = PTR_ERR(filp);
2616 req->ctx_fd = ctx->ctx_fd = fd;
2619 * does the user want to sample?
2621 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2622 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2627 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2632 ctx->ctx_fl_excl_idle,
2637 * initialize soft PMU state
2639 pfm_reset_pmu_state(ctx);
2641 fd_install(fd, filp);
2646 path = filp->f_path;
2650 if (ctx->ctx_buf_fmt) {
2651 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2654 pfm_context_free(ctx);
2661 static inline unsigned long
2662 pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2664 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2665 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2666 extern unsigned long carta_random32 (unsigned long seed);
2668 if (reg->flags & PFM_REGFL_RANDOM) {
2669 new_seed = carta_random32(old_seed);
2670 val -= (old_seed & mask); /* counter values are negative numbers! */
2671 if ((mask >> 32) != 0)
2672 /* construct a full 64-bit random value: */
2673 new_seed |= carta_random32(old_seed >> 32) << 32;
2674 reg->seed = new_seed;
2681 pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2683 unsigned long mask = ovfl_regs[0];
2684 unsigned long reset_others = 0UL;
2689 * now restore reset value on sampling overflowed counters
2691 mask >>= PMU_FIRST_COUNTER;
2692 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2694 if ((mask & 0x1UL) == 0UL) continue;
2696 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2697 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2699 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2703 * Now take care of resetting the other registers
2705 for(i = 0; reset_others; i++, reset_others >>= 1) {
2707 if ((reset_others & 0x1) == 0) continue;
2709 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2711 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2712 is_long_reset ? "long" : "short", i, val));
2717 pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2719 unsigned long mask = ovfl_regs[0];
2720 unsigned long reset_others = 0UL;
2724 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2726 if (ctx->ctx_state == PFM_CTX_MASKED) {
2727 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2732 * now restore reset value on sampling overflowed counters
2734 mask >>= PMU_FIRST_COUNTER;
2735 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2737 if ((mask & 0x1UL) == 0UL) continue;
2739 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2740 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2742 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2744 pfm_write_soft_counter(ctx, i, val);
2748 * Now take care of resetting the other registers
2750 for(i = 0; reset_others; i++, reset_others >>= 1) {
2752 if ((reset_others & 0x1) == 0) continue;
2754 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2756 if (PMD_IS_COUNTING(i)) {
2757 pfm_write_soft_counter(ctx, i, val);
2759 ia64_set_pmd(i, val);
2761 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2762 is_long_reset ? "long" : "short", i, val));
2768 pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2770 struct task_struct *task;
2771 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2772 unsigned long value, pmc_pm;
2773 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2774 unsigned int cnum, reg_flags, flags, pmc_type;
2775 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2776 int is_monitor, is_counting, state;
2778 pfm_reg_check_t wr_func;
2779 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2781 state = ctx->ctx_state;
2782 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2783 is_system = ctx->ctx_fl_system;
2784 task = ctx->ctx_task;
2785 impl_pmds = pmu_conf->impl_pmds[0];
2787 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2791 * In system wide and when the context is loaded, access can only happen
2792 * when the caller is running on the CPU being monitored by the session.
2793 * It does not have to be the owner (ctx_task) of the context per se.
2795 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2796 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2799 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2801 expert_mode = pfm_sysctl.expert_mode;
2803 for (i = 0; i < count; i++, req++) {
2805 cnum = req->reg_num;
2806 reg_flags = req->reg_flags;
2807 value = req->reg_value;
2808 smpl_pmds = req->reg_smpl_pmds[0];
2809 reset_pmds = req->reg_reset_pmds[0];
2813 if (cnum >= PMU_MAX_PMCS) {
2814 DPRINT(("pmc%u is invalid\n", cnum));
2818 pmc_type = pmu_conf->pmc_desc[cnum].type;
2819 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2820 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2821 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2824 * we reject all non implemented PMC as well
2825 * as attempts to modify PMC[0-3] which are used
2826 * as status registers by the PMU
2828 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2829 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2832 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2834 * If the PMC is a monitor, then if the value is not the default:
2835 * - system-wide session: PMCx.pm=1 (privileged monitor)
2836 * - per-task : PMCx.pm=0 (user monitor)
2838 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2839 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2848 * enforce generation of overflow interrupt. Necessary on all
2851 value |= 1 << PMU_PMC_OI;
2853 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2854 flags |= PFM_REGFL_OVFL_NOTIFY;
2857 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2859 /* verify validity of smpl_pmds */
2860 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2861 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2865 /* verify validity of reset_pmds */
2866 if ((reset_pmds & impl_pmds) != reset_pmds) {
2867 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2871 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2872 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2875 /* eventid on non-counting monitors are ignored */
2879 * execute write checker, if any
2881 if (likely(expert_mode == 0 && wr_func)) {
2882 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2883 if (ret) goto error;
2888 * no error on this register
2890 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2893 * Now we commit the changes to the software state
2897 * update overflow information
2901 * full flag update each time a register is programmed
2903 ctx->ctx_pmds[cnum].flags = flags;
2905 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2906 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2907 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2910 * Mark all PMDS to be accessed as used.
2912 * We do not keep track of PMC because we have to
2913 * systematically restore ALL of them.
2915 * We do not update the used_monitors mask, because
2916 * if we have not programmed them, then will be in
2917 * a quiescent state, therefore we will not need to
2918 * mask/restore then when context is MASKED.
2920 CTX_USED_PMD(ctx, reset_pmds);
2921 CTX_USED_PMD(ctx, smpl_pmds);
2923 * make sure we do not try to reset on
2924 * restart because we have established new values
2926 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
2929 * Needed in case the user does not initialize the equivalent
2930 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
2931 * possible leak here.
2933 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
2936 * keep track of the monitor PMC that we are using.
2937 * we save the value of the pmc in ctx_pmcs[] and if
2938 * the monitoring is not stopped for the context we also
2939 * place it in the saved state area so that it will be
2940 * picked up later by the context switch code.
2942 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
2944 * The value in th_pmcs[] may be modified on overflow, i.e., when
2945 * monitoring needs to be stopped.
2947 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
2950 * update context state
2952 ctx->ctx_pmcs[cnum] = value;
2956 * write thread state
2958 if (is_system == 0) ctx->th_pmcs[cnum] = value;
2961 * write hardware register if we can
2963 if (can_access_pmu) {
2964 ia64_set_pmc(cnum, value);
2969 * per-task SMP only here
2971 * we are guaranteed that the task is not running on the other CPU,
2972 * we indicate that this PMD will need to be reloaded if the task
2973 * is rescheduled on the CPU it ran last on.
2975 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
2980 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
2986 ctx->ctx_all_pmcs[0],
2987 ctx->ctx_used_pmds[0],
2988 ctx->ctx_pmds[cnum].eventid,
2991 ctx->ctx_reload_pmcs[0],
2992 ctx->ctx_used_monitors[0],
2993 ctx->ctx_ovfl_regs[0]));
2997 * make sure the changes are visible
2999 if (can_access_pmu) ia64_srlz_d();
3003 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3008 pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3010 struct task_struct *task;
3011 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3012 unsigned long value, hw_value, ovfl_mask;
3014 int i, can_access_pmu = 0, state;
3015 int is_counting, is_loaded, is_system, expert_mode;
3017 pfm_reg_check_t wr_func;
3020 state = ctx->ctx_state;
3021 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3022 is_system = ctx->ctx_fl_system;
3023 ovfl_mask = pmu_conf->ovfl_val;
3024 task = ctx->ctx_task;
3026 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3029 * on both UP and SMP, we can only write to the PMC when the task is
3030 * the owner of the local PMU.
3032 if (likely(is_loaded)) {
3034 * In system wide and when the context is loaded, access can only happen
3035 * when the caller is running on the CPU being monitored by the session.
3036 * It does not have to be the owner (ctx_task) of the context per se.
3038 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3039 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3042 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3044 expert_mode = pfm_sysctl.expert_mode;
3046 for (i = 0; i < count; i++, req++) {
3048 cnum = req->reg_num;
3049 value = req->reg_value;
3051 if (!PMD_IS_IMPL(cnum)) {
3052 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3055 is_counting = PMD_IS_COUNTING(cnum);
3056 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3059 * execute write checker, if any
3061 if (unlikely(expert_mode == 0 && wr_func)) {
3062 unsigned long v = value;
3064 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3065 if (ret) goto abort_mission;
3072 * no error on this register
3074 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3077 * now commit changes to software state
3082 * update virtualized (64bits) counter
3086 * write context state
3088 ctx->ctx_pmds[cnum].lval = value;
3091 * when context is load we use the split value
3094 hw_value = value & ovfl_mask;
3095 value = value & ~ovfl_mask;
3099 * update reset values (not just for counters)
3101 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3102 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3105 * update randomization parameters (not just for counters)
3107 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3108 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3111 * update context value
3113 ctx->ctx_pmds[cnum].val = value;
3116 * Keep track of what we use
3118 * We do not keep track of PMC because we have to
3119 * systematically restore ALL of them.
3121 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3124 * mark this PMD register used as well
3126 CTX_USED_PMD(ctx, RDEP(cnum));
3129 * make sure we do not try to reset on
3130 * restart because we have established new values
3132 if (is_counting && state == PFM_CTX_MASKED) {
3133 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3138 * write thread state
3140 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3143 * write hardware register if we can
3145 if (can_access_pmu) {
3146 ia64_set_pmd(cnum, hw_value);
3150 * we are guaranteed that the task is not running on the other CPU,
3151 * we indicate that this PMD will need to be reloaded if the task
3152 * is rescheduled on the CPU it ran last on.
3154 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3159 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3160 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3166 ctx->ctx_pmds[cnum].val,
3167 ctx->ctx_pmds[cnum].short_reset,
3168 ctx->ctx_pmds[cnum].long_reset,
3169 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3170 ctx->ctx_pmds[cnum].seed,
3171 ctx->ctx_pmds[cnum].mask,
3172 ctx->ctx_used_pmds[0],
3173 ctx->ctx_pmds[cnum].reset_pmds[0],
3174 ctx->ctx_reload_pmds[0],
3175 ctx->ctx_all_pmds[0],
3176 ctx->ctx_ovfl_regs[0]));
3180 * make changes visible
3182 if (can_access_pmu) ia64_srlz_d();
3188 * for now, we have only one possibility for error
3190 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3195 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3196 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3197 * interrupt is delivered during the call, it will be kept pending until we leave, making
3198 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3199 * guaranteed to return consistent data to the user, it may simply be old. It is not
3200 * trivial to treat the overflow while inside the call because you may end up in
3201 * some module sampling buffer code causing deadlocks.
3204 pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3206 struct task_struct *task;
3207 unsigned long val = 0UL, lval, ovfl_mask, sval;
3208 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3209 unsigned int cnum, reg_flags = 0;
3210 int i, can_access_pmu = 0, state;
3211 int is_loaded, is_system, is_counting, expert_mode;
3213 pfm_reg_check_t rd_func;
3216 * access is possible when loaded only for
3217 * self-monitoring tasks or in UP mode
3220 state = ctx->ctx_state;
3221 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3222 is_system = ctx->ctx_fl_system;
3223 ovfl_mask = pmu_conf->ovfl_val;
3224 task = ctx->ctx_task;
3226 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3228 if (likely(is_loaded)) {
3230 * In system wide and when the context is loaded, access can only happen
3231 * when the caller is running on the CPU being monitored by the session.
3232 * It does not have to be the owner (ctx_task) of the context per se.
3234 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3235 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3239 * this can be true when not self-monitoring only in UP
3241 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3243 if (can_access_pmu) ia64_srlz_d();
3245 expert_mode = pfm_sysctl.expert_mode;
3247 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3253 * on both UP and SMP, we can only read the PMD from the hardware register when
3254 * the task is the owner of the local PMU.
3257 for (i = 0; i < count; i++, req++) {
3259 cnum = req->reg_num;
3260 reg_flags = req->reg_flags;
3262 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3264 * we can only read the register that we use. That includes
3265 * the one we explicitly initialize AND the one we want included
3266 * in the sampling buffer (smpl_regs).
3268 * Having this restriction allows optimization in the ctxsw routine
3269 * without compromising security (leaks)
3271 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3273 sval = ctx->ctx_pmds[cnum].val;
3274 lval = ctx->ctx_pmds[cnum].lval;
3275 is_counting = PMD_IS_COUNTING(cnum);
3278 * If the task is not the current one, then we check if the
3279 * PMU state is still in the local live register due to lazy ctxsw.
3280 * If true, then we read directly from the registers.
3282 if (can_access_pmu){
3283 val = ia64_get_pmd(cnum);
3286 * context has been saved
3287 * if context is zombie, then task does not exist anymore.
3288 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3290 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3292 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3296 * XXX: need to check for overflow when loaded
3303 * execute read checker, if any
3305 if (unlikely(expert_mode == 0 && rd_func)) {
3306 unsigned long v = val;
3307 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3308 if (ret) goto error;
3313 PFM_REG_RETFLAG_SET(reg_flags, 0);
3315 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3318 * update register return value, abort all if problem during copy.
3319 * we only modify the reg_flags field. no check mode is fine because
3320 * access has been verified upfront in sys_perfmonctl().
3322 req->reg_value = val;
3323 req->reg_flags = reg_flags;
3324 req->reg_last_reset_val = lval;
3330 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3335 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3339 if (req == NULL) return -EINVAL;
3341 ctx = GET_PMU_CTX();
3343 if (ctx == NULL) return -EINVAL;
3346 * for now limit to current task, which is enough when calling
3347 * from overflow handler
3349 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3351 return pfm_write_pmcs(ctx, req, nreq, regs);
3353 EXPORT_SYMBOL(pfm_mod_write_pmcs);
3356 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3360 if (req == NULL) return -EINVAL;
3362 ctx = GET_PMU_CTX();
3364 if (ctx == NULL) return -EINVAL;
3367 * for now limit to current task, which is enough when calling
3368 * from overflow handler
3370 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3372 return pfm_read_pmds(ctx, req, nreq, regs);
3374 EXPORT_SYMBOL(pfm_mod_read_pmds);
3377 * Only call this function when a process it trying to
3378 * write the debug registers (reading is always allowed)
3381 pfm_use_debug_registers(struct task_struct *task)
3383 pfm_context_t *ctx = task->thread.pfm_context;
3384 unsigned long flags;
3387 if (pmu_conf->use_rr_dbregs == 0) return 0;
3389 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3394 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3397 * Even on SMP, we do not need to use an atomic here because
3398 * the only way in is via ptrace() and this is possible only when the
3399 * process is stopped. Even in the case where the ctxsw out is not totally
3400 * completed by the time we come here, there is no way the 'stopped' process
3401 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3402 * So this is always safe.
3404 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3409 * We cannot allow setting breakpoints when system wide monitoring
3410 * sessions are using the debug registers.
3412 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3415 pfm_sessions.pfs_ptrace_use_dbregs++;
3417 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3418 pfm_sessions.pfs_ptrace_use_dbregs,
3419 pfm_sessions.pfs_sys_use_dbregs,
3420 task_pid_nr(task), ret));
3428 * This function is called for every task that exits with the
3429 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3430 * able to use the debug registers for debugging purposes via
3431 * ptrace(). Therefore we know it was not using them for
3432 * performance monitoring, so we only decrement the number
3433 * of "ptraced" debug register users to keep the count up to date
3436 pfm_release_debug_registers(struct task_struct *task)
3438 unsigned long flags;
3441 if (pmu_conf->use_rr_dbregs == 0) return 0;
3444 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3445 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3448 pfm_sessions.pfs_ptrace_use_dbregs--;
3457 pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3459 struct task_struct *task;
3460 pfm_buffer_fmt_t *fmt;
3461 pfm_ovfl_ctrl_t rst_ctrl;
3462 int state, is_system;
3465 state = ctx->ctx_state;
3466 fmt = ctx->ctx_buf_fmt;
3467 is_system = ctx->ctx_fl_system;
3468 task = PFM_CTX_TASK(ctx);
3471 case PFM_CTX_MASKED:
3473 case PFM_CTX_LOADED:
3474 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3476 case PFM_CTX_UNLOADED:
3477 case PFM_CTX_ZOMBIE:
3478 DPRINT(("invalid state=%d\n", state));
3481 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3486 * In system wide and when the context is loaded, access can only happen
3487 * when the caller is running on the CPU being monitored by the session.
3488 * It does not have to be the owner (ctx_task) of the context per se.
3490 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3491 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3496 if (unlikely(task == NULL)) {
3497 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3501 if (task == current || is_system) {
3503 fmt = ctx->ctx_buf_fmt;
3505 DPRINT(("restarting self %d ovfl=0x%lx\n",
3507 ctx->ctx_ovfl_regs[0]));
3509 if (CTX_HAS_SMPL(ctx)) {
3511 prefetch(ctx->ctx_smpl_hdr);
3513 rst_ctrl.bits.mask_monitoring = 0;
3514 rst_ctrl.bits.reset_ovfl_pmds = 0;
3516 if (state == PFM_CTX_LOADED)
3517 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3519 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3521 rst_ctrl.bits.mask_monitoring = 0;
3522 rst_ctrl.bits.reset_ovfl_pmds = 1;
3526 if (rst_ctrl.bits.reset_ovfl_pmds)
3527 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3529 if (rst_ctrl.bits.mask_monitoring == 0) {
3530 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3532 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3534 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3536 // cannot use pfm_stop_monitoring(task, regs);
3540 * clear overflowed PMD mask to remove any stale information
3542 ctx->ctx_ovfl_regs[0] = 0UL;
3545 * back to LOADED state
3547 ctx->ctx_state = PFM_CTX_LOADED;
3550 * XXX: not really useful for self monitoring
3552 ctx->ctx_fl_can_restart = 0;
3558 * restart another task
3562 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3563 * one is seen by the task.
3565 if (state == PFM_CTX_MASKED) {
3566 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3568 * will prevent subsequent restart before this one is
3569 * seen by other task
3571 ctx->ctx_fl_can_restart = 0;
3575 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3576 * the task is blocked or on its way to block. That's the normal
3577 * restart path. If the monitoring is not masked, then the task
3578 * can be actively monitoring and we cannot directly intervene.
3579 * Therefore we use the trap mechanism to catch the task and
3580 * force it to reset the buffer/reset PMDs.
3582 * if non-blocking, then we ensure that the task will go into
3583 * pfm_handle_work() before returning to user mode.
3585 * We cannot explicitly reset another task, it MUST always
3586 * be done by the task itself. This works for system wide because
3587 * the tool that is controlling the session is logically doing
3588 * "self-monitoring".
3590 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3591 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3592 complete(&ctx->ctx_restart_done);
3594 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3596 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3598 PFM_SET_WORK_PENDING(task, 1);
3600 set_notify_resume(task);
3603 * XXX: send reschedule if task runs on another CPU
3610 pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3612 unsigned int m = *(unsigned int *)arg;
3614 pfm_sysctl.debug = m == 0 ? 0 : 1;
3616 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3619 memset(pfm_stats, 0, sizeof(pfm_stats));
3620 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3626 * arg can be NULL and count can be zero for this function
3629 pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3631 struct thread_struct *thread = NULL;
3632 struct task_struct *task;
3633 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3634 unsigned long flags;
3639 int i, can_access_pmu = 0;
3640 int is_system, is_loaded;
3642 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3644 state = ctx->ctx_state;
3645 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3646 is_system = ctx->ctx_fl_system;
3647 task = ctx->ctx_task;
3649 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3652 * on both UP and SMP, we can only write to the PMC when the task is
3653 * the owner of the local PMU.
3656 thread = &task->thread;
3658 * In system wide and when the context is loaded, access can only happen
3659 * when the caller is running on the CPU being monitored by the session.
3660 * It does not have to be the owner (ctx_task) of the context per se.
3662 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3663 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3666 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3670 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3671 * ensuring that no real breakpoint can be installed via this call.
3673 * IMPORTANT: regs can be NULL in this function
3676 first_time = ctx->ctx_fl_using_dbreg == 0;
3679 * don't bother if we are loaded and task is being debugged
3681 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3682 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3687 * check for debug registers in system wide mode
3689 * If though a check is done in pfm_context_load(),
3690 * we must repeat it here, in case the registers are
3691 * written after the context is loaded
3696 if (first_time && is_system) {
3697 if (pfm_sessions.pfs_ptrace_use_dbregs)
3700 pfm_sessions.pfs_sys_use_dbregs++;
3705 if (ret != 0) return ret;
3708 * mark ourself as user of the debug registers for
3711 ctx->ctx_fl_using_dbreg = 1;
3714 * clear hardware registers to make sure we don't
3715 * pick up stale state.
3717 * for a system wide session, we do not use
3718 * thread.dbr, thread.ibr because this process
3719 * never leaves the current CPU and the state
3720 * is shared by all processes running on it
3722 if (first_time && can_access_pmu) {
3723 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3724 for (i=0; i < pmu_conf->num_ibrs; i++) {
3725 ia64_set_ibr(i, 0UL);
3726 ia64_dv_serialize_instruction();
3729 for (i=0; i < pmu_conf->num_dbrs; i++) {
3730 ia64_set_dbr(i, 0UL);
3731 ia64_dv_serialize_data();
3737 * Now install the values into the registers
3739 for (i = 0; i < count; i++, req++) {
3741 rnum = req->dbreg_num;
3742 dbreg.val = req->dbreg_value;
3746 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3747 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3748 rnum, dbreg.val, mode, i, count));
3754 * make sure we do not install enabled breakpoint
3757 if (mode == PFM_CODE_RR)
3758 dbreg.ibr.ibr_x = 0;
3760 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3763 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3766 * Debug registers, just like PMC, can only be modified
3767 * by a kernel call. Moreover, perfmon() access to those
3768 * registers are centralized in this routine. The hardware
3769 * does not modify the value of these registers, therefore,
3770 * if we save them as they are written, we can avoid having
3771 * to save them on context switch out. This is made possible
3772 * by the fact that when perfmon uses debug registers, ptrace()
3773 * won't be able to modify them concurrently.
3775 if (mode == PFM_CODE_RR) {
3776 CTX_USED_IBR(ctx, rnum);
3778 if (can_access_pmu) {
3779 ia64_set_ibr(rnum, dbreg.val);
3780 ia64_dv_serialize_instruction();
3783 ctx->ctx_ibrs[rnum] = dbreg.val;
3785 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3786 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3788 CTX_USED_DBR(ctx, rnum);
3790 if (can_access_pmu) {
3791 ia64_set_dbr(rnum, dbreg.val);
3792 ia64_dv_serialize_data();
3794 ctx->ctx_dbrs[rnum] = dbreg.val;
3796 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3797 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3805 * in case it was our first attempt, we undo the global modifications
3809 if (ctx->ctx_fl_system) {
3810 pfm_sessions.pfs_sys_use_dbregs--;
3813 ctx->ctx_fl_using_dbreg = 0;
3816 * install error return flag
3818 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3824 pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3826 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3830 pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3832 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3836 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3840 if (req == NULL) return -EINVAL;
3842 ctx = GET_PMU_CTX();
3844 if (ctx == NULL) return -EINVAL;
3847 * for now limit to current task, which is enough when calling
3848 * from overflow handler
3850 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3852 return pfm_write_ibrs(ctx, req, nreq, regs);
3854 EXPORT_SYMBOL(pfm_mod_write_ibrs);
3857 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3861 if (req == NULL) return -EINVAL;
3863 ctx = GET_PMU_CTX();
3865 if (ctx == NULL) return -EINVAL;
3868 * for now limit to current task, which is enough when calling
3869 * from overflow handler
3871 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3873 return pfm_write_dbrs(ctx, req, nreq, regs);
3875 EXPORT_SYMBOL(pfm_mod_write_dbrs);
3879 pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3881 pfarg_features_t *req = (pfarg_features_t *)arg;
3883 req->ft_version = PFM_VERSION;
3888 pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3890 struct pt_regs *tregs;
3891 struct task_struct *task = PFM_CTX_TASK(ctx);
3892 int state, is_system;
3894 state = ctx->ctx_state;
3895 is_system = ctx->ctx_fl_system;
3898 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3900 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3903 * In system wide and when the context is loaded, access can only happen
3904 * when the caller is running on the CPU being monitored by the session.
3905 * It does not have to be the owner (ctx_task) of the context per se.
3907 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3908 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3911 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3912 task_pid_nr(PFM_CTX_TASK(ctx)),
3916 * in system mode, we need to update the PMU directly
3917 * and the user level state of the caller, which may not
3918 * necessarily be the creator of the context.
3922 * Update local PMU first
3926 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
3930 * update local cpuinfo
3932 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
3935 * stop monitoring, does srlz.i
3940 * stop monitoring in the caller
3942 ia64_psr(regs)->pp = 0;
3950 if (task == current) {
3951 /* stop monitoring at kernel level */
3955 * stop monitoring at the user level
3957 ia64_psr(regs)->up = 0;
3959 tregs = task_pt_regs(task);
3962 * stop monitoring at the user level
3964 ia64_psr(tregs)->up = 0;
3967 * monitoring disabled in kernel at next reschedule
3969 ctx->ctx_saved_psr_up = 0;
3970 DPRINT(("task=[%d]\n", task_pid_nr(task)));
3977 pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3979 struct pt_regs *tregs;
3980 int state, is_system;
3982 state = ctx->ctx_state;
3983 is_system = ctx->ctx_fl_system;
3985 if (state != PFM_CTX_LOADED) return -EINVAL;
3988 * In system wide and when the context is loaded, access can only happen
3989 * when the caller is running on the CPU being monitored by the session.
3990 * It does not have to be the owner (ctx_task) of the context per se.
3992 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3993 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3998 * in system mode, we need to update the PMU directly
3999 * and the user level state of the caller, which may not
4000 * necessarily be the creator of the context.
4005 * set user level psr.pp for the caller
4007 ia64_psr(regs)->pp = 1;
4010 * now update the local PMU and cpuinfo
4012 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4015 * start monitoring at kernel level
4020 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4030 if (ctx->ctx_task == current) {
4032 /* start monitoring at kernel level */
4036 * activate monitoring at user level
4038 ia64_psr(regs)->up = 1;
4041 tregs = task_pt_regs(ctx->ctx_task);
4044 * start monitoring at the kernel level the next
4045 * time the task is scheduled
4047 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4050 * activate monitoring at user level
4052 ia64_psr(tregs)->up = 1;
4058 pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4060 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4065 for (i = 0; i < count; i++, req++) {
4067 cnum = req->reg_num;
4069 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4071 req->reg_value = PMC_DFL_VAL(cnum);
4073 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4075 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4080 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4085 pfm_check_task_exist(pfm_context_t *ctx)
4087 struct task_struct *g, *t;
4090 read_lock(&tasklist_lock);
4092 do_each_thread (g, t) {
4093 if (t->thread.pfm_context == ctx) {
4097 } while_each_thread (g, t);
4099 read_unlock(&tasklist_lock);
4101 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4107 pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4109 struct task_struct *task;
4110 struct thread_struct *thread;
4111 struct pfm_context_t *old;
4112 unsigned long flags;
4114 struct task_struct *owner_task = NULL;
4116 pfarg_load_t *req = (pfarg_load_t *)arg;
4117 unsigned long *pmcs_source, *pmds_source;
4120 int state, is_system, set_dbregs = 0;
4122 state = ctx->ctx_state;
4123 is_system = ctx->ctx_fl_system;
4125 * can only load from unloaded or terminated state
4127 if (state != PFM_CTX_UNLOADED) {
4128 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4134 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4136 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4137 DPRINT(("cannot use blocking mode on self\n"));
4141 ret = pfm_get_task(ctx, req->load_pid, &task);
4143 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4150 * system wide is self monitoring only
4152 if (is_system && task != current) {
4153 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4158 thread = &task->thread;
4162 * cannot load a context which is using range restrictions,
4163 * into a task that is being debugged.
4165 if (ctx->ctx_fl_using_dbreg) {
4166 if (thread->flags & IA64_THREAD_DBG_VALID) {
4168 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4174 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4175 DPRINT(("cannot load [%d] dbregs in use\n",
4176 task_pid_nr(task)));
4179 pfm_sessions.pfs_sys_use_dbregs++;
4180 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4187 if (ret) goto error;
4191 * SMP system-wide monitoring implies self-monitoring.
4193 * The programming model expects the task to
4194 * be pinned on a CPU throughout the session.
4195 * Here we take note of the current CPU at the
4196 * time the context is loaded. No call from
4197 * another CPU will be allowed.
4199 * The pinning via shed_setaffinity()
4200 * must be done by the calling task prior
4203 * systemwide: keep track of CPU this session is supposed to run on
4205 the_cpu = ctx->ctx_cpu = smp_processor_id();
4209 * now reserve the session
4211 ret = pfm_reserve_session(current, is_system, the_cpu);
4212 if (ret) goto error;
4215 * task is necessarily stopped at this point.
4217 * If the previous context was zombie, then it got removed in
4218 * pfm_save_regs(). Therefore we should not see it here.
4219 * If we see a context, then this is an active context
4221 * XXX: needs to be atomic
4223 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4224 thread->pfm_context, ctx));
4227 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4229 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4233 pfm_reset_msgq(ctx);
4235 ctx->ctx_state = PFM_CTX_LOADED;
4238 * link context to task
4240 ctx->ctx_task = task;
4244 * we load as stopped
4246 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4247 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4249 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4251 thread->flags |= IA64_THREAD_PM_VALID;
4255 * propagate into thread-state
4257 pfm_copy_pmds(task, ctx);
4258 pfm_copy_pmcs(task, ctx);
4260 pmcs_source = ctx->th_pmcs;
4261 pmds_source = ctx->th_pmds;
4264 * always the case for system-wide
4266 if (task == current) {
4268 if (is_system == 0) {
4270 /* allow user level control */
4271 ia64_psr(regs)->sp = 0;
4272 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4274 SET_LAST_CPU(ctx, smp_processor_id());
4276 SET_ACTIVATION(ctx);
4279 * push the other task out, if any
4281 owner_task = GET_PMU_OWNER();
4282 if (owner_task) pfm_lazy_save_regs(owner_task);
4286 * load all PMD from ctx to PMU (as opposed to thread state)
4287 * restore all PMC from ctx to PMU
4289 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4290 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4292 ctx->ctx_reload_pmcs[0] = 0UL;
4293 ctx->ctx_reload_pmds[0] = 0UL;
4296 * guaranteed safe by earlier check against DBG_VALID
4298 if (ctx->ctx_fl_using_dbreg) {
4299 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4300 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4305 SET_PMU_OWNER(task, ctx);
4307 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4310 * when not current, task MUST be stopped, so this is safe
4312 regs = task_pt_regs(task);
4314 /* force a full reload */
4315 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4316 SET_LAST_CPU(ctx, -1);
4318 /* initial saved psr (stopped) */
4319 ctx->ctx_saved_psr_up = 0UL;
4320 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4326 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4329 * we must undo the dbregs setting (for system-wide)
4331 if (ret && set_dbregs) {
4333 pfm_sessions.pfs_sys_use_dbregs--;
4337 * release task, there is now a link with the context
4339 if (is_system == 0 && task != current) {
4343 ret = pfm_check_task_exist(ctx);
4345 ctx->ctx_state = PFM_CTX_UNLOADED;
4346 ctx->ctx_task = NULL;
4354 * in this function, we do not need to increase the use count
4355 * for the task via get_task_struct(), because we hold the
4356 * context lock. If the task were to disappear while having
4357 * a context attached, it would go through pfm_exit_thread()
4358 * which also grabs the context lock and would therefore be blocked
4359 * until we are here.
4361 static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4364 pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4366 struct task_struct *task = PFM_CTX_TASK(ctx);
4367 struct pt_regs *tregs;
4368 int prev_state, is_system;
4371 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4373 prev_state = ctx->ctx_state;
4374 is_system = ctx->ctx_fl_system;
4377 * unload only when necessary
4379 if (prev_state == PFM_CTX_UNLOADED) {
4380 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4385 * clear psr and dcr bits
4387 ret = pfm_stop(ctx, NULL, 0, regs);
4388 if (ret) return ret;
4390 ctx->ctx_state = PFM_CTX_UNLOADED;
4393 * in system mode, we need to update the PMU directly
4394 * and the user level state of the caller, which may not
4395 * necessarily be the creator of the context.
4402 * local PMU is taken care of in pfm_stop()
4404 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4405 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4408 * save PMDs in context
4411 pfm_flush_pmds(current, ctx);
4414 * at this point we are done with the PMU
4415 * so we can unreserve the resource.
4417 if (prev_state != PFM_CTX_ZOMBIE)
4418 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4421 * disconnect context from task
4423 task->thread.pfm_context = NULL;
4425 * disconnect task from context
4427 ctx->ctx_task = NULL;
4430 * There is nothing more to cleanup here.
4438 tregs = task == current ? regs : task_pt_regs(task);
4440 if (task == current) {
4442 * cancel user level control
4444 ia64_psr(regs)->sp = 1;
4446 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4449 * save PMDs to context
4452 pfm_flush_pmds(task, ctx);
4455 * at this point we are done with the PMU
4456 * so we can unreserve the resource.
4458 * when state was ZOMBIE, we have already unreserved.
4460 if (prev_state != PFM_CTX_ZOMBIE)
4461 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4464 * reset activation counter and psr
4466 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4467 SET_LAST_CPU(ctx, -1);
4470 * PMU state will not be restored
4472 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4475 * break links between context and task
4477 task->thread.pfm_context = NULL;
4478 ctx->ctx_task = NULL;
4480 PFM_SET_WORK_PENDING(task, 0);
4482 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4483 ctx->ctx_fl_can_restart = 0;
4484 ctx->ctx_fl_going_zombie = 0;
4486 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4493 * called only from exit_thread()
4494 * we come here only if the task has a context attached (loaded or masked)
4497 pfm_exit_thread(struct task_struct *task)
4500 unsigned long flags;
4501 struct pt_regs *regs = task_pt_regs(task);
4505 ctx = PFM_GET_CTX(task);
4507 PROTECT_CTX(ctx, flags);
4509 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4511 state = ctx->ctx_state;
4513 case PFM_CTX_UNLOADED:
4515 * only comes to this function if pfm_context is not NULL, i.e., cannot
4516 * be in unloaded state
4518 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4520 case PFM_CTX_LOADED:
4521 case PFM_CTX_MASKED:
4522 ret = pfm_context_unload(ctx, NULL, 0, regs);
4524 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4526 DPRINT(("ctx unloaded for current state was %d\n", state));
4528 pfm_end_notify_user(ctx);
4530 case PFM_CTX_ZOMBIE:
4531 ret = pfm_context_unload(ctx, NULL, 0, regs);
4533 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4538 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4541 UNPROTECT_CTX(ctx, flags);
4543 { u64 psr = pfm_get_psr();
4544 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4545 BUG_ON(GET_PMU_OWNER());
4546 BUG_ON(ia64_psr(regs)->up);
4547 BUG_ON(ia64_psr(regs)->pp);
4551 * All memory free operations (especially for vmalloc'ed memory)
4552 * MUST be done with interrupts ENABLED.
4554 if (free_ok) pfm_context_free(ctx);
4558 * functions MUST be listed in the increasing order of their index (see permfon.h)
4560 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4561 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4562 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4563 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4564 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4566 static pfm_cmd_desc_t pfm_cmd_tab[]={
4567 /* 0 */PFM_CMD_NONE,
4568 /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4569 /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4570 /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4571 /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4572 /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4573 /* 6 */PFM_CMD_NONE,
4574 /* 7 */PFM_CMD_NONE,
4575 /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4576 /* 9 */PFM_CMD_NONE,
4577 /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4578 /* 11 */PFM_CMD_NONE,
4579 /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4580 /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4581 /* 14 */PFM_CMD_NONE,
4582 /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4583 /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4584 /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4585 /* 18 */PFM_CMD_NONE,
4586 /* 19 */PFM_CMD_NONE,
4587 /* 20 */PFM_CMD_NONE,
4588 /* 21 */PFM_CMD_NONE,
4589 /* 22 */PFM_CMD_NONE,
4590 /* 23 */PFM_CMD_NONE,
4591 /* 24 */PFM_CMD_NONE,
4592 /* 25 */PFM_CMD_NONE,
4593 /* 26 */PFM_CMD_NONE,
4594 /* 27 */PFM_CMD_NONE,
4595 /* 28 */PFM_CMD_NONE,
4596 /* 29 */PFM_CMD_NONE,
4597 /* 30 */PFM_CMD_NONE,
4598 /* 31 */PFM_CMD_NONE,
4599 /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4600 /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4602 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4605 pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4607 struct task_struct *task;
4608 int state, old_state;
4611 state = ctx->ctx_state;
4612 task = ctx->ctx_task;
4615 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4619 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4623 task->state, PFM_CMD_STOPPED(cmd)));
4626 * self-monitoring always ok.
4628 * for system-wide the caller can either be the creator of the
4629 * context (to one to which the context is attached to) OR
4630 * a task running on the same CPU as the session.
4632 if (task == current || ctx->ctx_fl_system) return 0;
4635 * we are monitoring another thread
4638 case PFM_CTX_UNLOADED:
4640 * if context is UNLOADED we are safe to go
4643 case PFM_CTX_ZOMBIE:
4645 * no command can operate on a zombie context
4647 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4649 case PFM_CTX_MASKED:
4651 * PMU state has been saved to software even though
4652 * the thread may still be running.
4654 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4658 * context is LOADED or MASKED. Some commands may need to have
4661 * We could lift this restriction for UP but it would mean that
4662 * the user has no guarantee the task would not run between
4663 * two successive calls to perfmonctl(). That's probably OK.
4664 * If this user wants to ensure the task does not run, then
4665 * the task must be stopped.
4667 if (PFM_CMD_STOPPED(cmd)) {
4668 if (!task_is_stopped_or_traced(task)) {
4669 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4673 * task is now stopped, wait for ctxsw out
4675 * This is an interesting point in the code.
4676 * We need to unprotect the context because
4677 * the pfm_save_regs() routines needs to grab
4678 * the same lock. There are danger in doing
4679 * this because it leaves a window open for
4680 * another task to get access to the context
4681 * and possibly change its state. The one thing
4682 * that is not possible is for the context to disappear
4683 * because we are protected by the VFS layer, i.e.,
4684 * get_fd()/put_fd().
4688 UNPROTECT_CTX(ctx, flags);
4690 wait_task_inactive(task, 0);
4692 PROTECT_CTX(ctx, flags);
4695 * we must recheck to verify if state has changed
4697 if (ctx->ctx_state != old_state) {
4698 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4706 * system-call entry point (must return long)
4709 sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4711 struct fd f = {NULL, 0};
4712 pfm_context_t *ctx = NULL;
4713 unsigned long flags = 0UL;
4714 void *args_k = NULL;
4715 long ret; /* will expand int return types */
4716 size_t base_sz, sz, xtra_sz = 0;
4717 int narg, completed_args = 0, call_made = 0, cmd_flags;
4718 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4719 int (*getsize)(void *arg, size_t *sz);
4720 #define PFM_MAX_ARGSIZE 4096
4723 * reject any call if perfmon was disabled at initialization
4725 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4727 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4728 DPRINT(("invalid cmd=%d\n", cmd));
4732 func = pfm_cmd_tab[cmd].cmd_func;
4733 narg = pfm_cmd_tab[cmd].cmd_narg;
4734 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4735 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4736 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4738 if (unlikely(func == NULL)) {
4739 DPRINT(("invalid cmd=%d\n", cmd));
4743 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4751 * check if number of arguments matches what the command expects
4753 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4757 sz = xtra_sz + base_sz*count;
4759 * limit abuse to min page size
4761 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4762 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4767 * allocate default-sized argument buffer
4769 if (likely(count && args_k == NULL)) {
4770 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4771 if (args_k == NULL) return -ENOMEM;
4779 * assume sz = 0 for command without parameters
4781 if (sz && copy_from_user(args_k, arg, sz)) {
4782 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4787 * check if command supports extra parameters
4789 if (completed_args == 0 && getsize) {
4791 * get extra parameters size (based on main argument)
4793 ret = (*getsize)(args_k, &xtra_sz);
4794 if (ret) goto error_args;
4798 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4800 /* retry if necessary */
4801 if (likely(xtra_sz)) goto restart_args;
4804 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4809 if (unlikely(f.file == NULL)) {
4810 DPRINT(("invalid fd %d\n", fd));
4813 if (unlikely(PFM_IS_FILE(f.file) == 0)) {
4814 DPRINT(("fd %d not related to perfmon\n", fd));
4818 ctx = f.file->private_data;
4819 if (unlikely(ctx == NULL)) {
4820 DPRINT(("no context for fd %d\n", fd));
4823 prefetch(&ctx->ctx_state);
4825 PROTECT_CTX(ctx, flags);
4828 * check task is stopped
4830 ret = pfm_check_task_state(ctx, cmd, flags);
4831 if (unlikely(ret)) goto abort_locked;
4834 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4840 DPRINT(("context unlocked\n"));
4841 UNPROTECT_CTX(ctx, flags);
4844 /* copy argument back to user, if needed */
4845 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4853 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4859 pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4861 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4862 pfm_ovfl_ctrl_t rst_ctrl;
4866 state = ctx->ctx_state;
4868 * Unlock sampling buffer and reset index atomically
4869 * XXX: not really needed when blocking
4871 if (CTX_HAS_SMPL(ctx)) {
4873 rst_ctrl.bits.mask_monitoring = 0;
4874 rst_ctrl.bits.reset_ovfl_pmds = 0;
4876 if (state == PFM_CTX_LOADED)
4877 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4879 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4881 rst_ctrl.bits.mask_monitoring = 0;
4882 rst_ctrl.bits.reset_ovfl_pmds = 1;
4886 if (rst_ctrl.bits.reset_ovfl_pmds) {
4887 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4889 if (rst_ctrl.bits.mask_monitoring == 0) {
4890 DPRINT(("resuming monitoring\n"));
4891 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4893 DPRINT(("stopping monitoring\n"));
4894 //pfm_stop_monitoring(current, regs);
4896 ctx->ctx_state = PFM_CTX_LOADED;
4901 * context MUST BE LOCKED when calling
4902 * can only be called for current
4905 pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4909 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
4911 ret = pfm_context_unload(ctx, NULL, 0, regs);
4913 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
4917 * and wakeup controlling task, indicating we are now disconnected
4919 wake_up_interruptible(&ctx->ctx_zombieq);
4922 * given that context is still locked, the controlling
4923 * task will only get access when we return from
4924 * pfm_handle_work().
4928 static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4931 * pfm_handle_work() can be called with interrupts enabled
4932 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
4933 * call may sleep, therefore we must re-enable interrupts
4934 * to avoid deadlocks. It is safe to do so because this function
4935 * is called ONLY when returning to user level (pUStk=1), in which case
4936 * there is no risk of kernel stack overflow due to deep
4937 * interrupt nesting.
4940 pfm_handle_work(void)
4943 struct pt_regs *regs;
4944 unsigned long flags, dummy_flags;
4945 unsigned long ovfl_regs;
4946 unsigned int reason;
4949 ctx = PFM_GET_CTX(current);
4951 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
4952 task_pid_nr(current));
4956 PROTECT_CTX(ctx, flags);
4958 PFM_SET_WORK_PENDING(current, 0);
4960 regs = task_pt_regs(current);
4963 * extract reason for being here and clear
4965 reason = ctx->ctx_fl_trap_reason;
4966 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4967 ovfl_regs = ctx->ctx_ovfl_regs[0];
4969 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
4972 * must be done before we check for simple-reset mode
4974 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
4977 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
4978 if (reason == PFM_TRAP_REASON_RESET)
4982 * restore interrupt mask to what it was on entry.
4983 * Could be enabled/diasbled.
4985 UNPROTECT_CTX(ctx, flags);
4988 * force interrupt enable because of down_interruptible()
4992 DPRINT(("before block sleeping\n"));
4995 * may go through without blocking on SMP systems
4996 * if restart has been received already by the time we call down()
4998 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5000 DPRINT(("after block sleeping ret=%d\n", ret));
5003 * lock context and mask interrupts again
5004 * We save flags into a dummy because we may have
5005 * altered interrupts mask compared to entry in this
5008 PROTECT_CTX(ctx, dummy_flags);
5011 * we need to read the ovfl_regs only after wake-up
5012 * because we may have had pfm_write_pmds() in between
5013 * and that can changed PMD values and therefore
5014 * ovfl_regs is reset for these new PMD values.
5016 ovfl_regs = ctx->ctx_ovfl_regs[0];
5018 if (ctx->ctx_fl_going_zombie) {
5020 DPRINT(("context is zombie, bailing out\n"));
5021 pfm_context_force_terminate(ctx, regs);
5025 * in case of interruption of down() we don't restart anything
5031 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5032 ctx->ctx_ovfl_regs[0] = 0UL;
5036 * restore flags as they were upon entry
5038 UNPROTECT_CTX(ctx, flags);
5042 pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5044 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5045 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5049 DPRINT(("waking up somebody\n"));
5051 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5054 * safe, we are not in intr handler, nor in ctxsw when
5057 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5063 pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5065 pfm_msg_t *msg = NULL;
5067 if (ctx->ctx_fl_no_msg == 0) {
5068 msg = pfm_get_new_msg(ctx);
5070 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5074 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5075 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5076 msg->pfm_ovfl_msg.msg_active_set = 0;
5077 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5078 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5079 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5080 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5081 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5084 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5090 return pfm_notify_user(ctx, msg);
5094 pfm_end_notify_user(pfm_context_t *ctx)
5098 msg = pfm_get_new_msg(ctx);
5100 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5104 memset(msg, 0, sizeof(*msg));
5106 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5107 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5108 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5110 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5115 return pfm_notify_user(ctx, msg);
5119 * main overflow processing routine.
5120 * it can be called from the interrupt path or explicitly during the context switch code
5122 static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5123 unsigned long pmc0, struct pt_regs *regs)
5125 pfm_ovfl_arg_t *ovfl_arg;
5127 unsigned long old_val, ovfl_val, new_val;
5128 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5129 unsigned long tstamp;
5130 pfm_ovfl_ctrl_t ovfl_ctrl;
5131 unsigned int i, has_smpl;
5132 int must_notify = 0;
5134 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5137 * sanity test. Should never happen
5139 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5141 tstamp = ia64_get_itc();
5142 mask = pmc0 >> PMU_FIRST_COUNTER;
5143 ovfl_val = pmu_conf->ovfl_val;
5144 has_smpl = CTX_HAS_SMPL(ctx);
5146 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5147 "used_pmds=0x%lx\n",
5149 task ? task_pid_nr(task): -1,
5150 (regs ? regs->cr_iip : 0),
5151 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5152 ctx->ctx_used_pmds[0]));
5156 * first we update the virtual counters
5157 * assume there was a prior ia64_srlz_d() issued
5159 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5161 /* skip pmd which did not overflow */
5162 if ((mask & 0x1) == 0) continue;
5165 * Note that the pmd is not necessarily 0 at this point as qualified events
5166 * may have happened before the PMU was frozen. The residual count is not
5167 * taken into consideration here but will be with any read of the pmd via
5170 old_val = new_val = ctx->ctx_pmds[i].val;
5171 new_val += 1 + ovfl_val;
5172 ctx->ctx_pmds[i].val = new_val;
5175 * check for overflow condition
5177 if (likely(old_val > new_val)) {
5178 ovfl_pmds |= 1UL << i;
5179 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5182 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5186 ia64_get_pmd(i) & ovfl_val,
5192 * there was no 64-bit overflow, nothing else to do
5194 if (ovfl_pmds == 0UL) return;
5197 * reset all control bits
5203 * if a sampling format module exists, then we "cache" the overflow by
5204 * calling the module's handler() routine.
5207 unsigned long start_cycles, end_cycles;
5208 unsigned long pmd_mask;
5210 int this_cpu = smp_processor_id();
5212 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5213 ovfl_arg = &ctx->ctx_ovfl_arg;
5215 prefetch(ctx->ctx_smpl_hdr);
5217 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5221 if ((pmd_mask & 0x1) == 0) continue;
5223 ovfl_arg->ovfl_pmd = (unsigned char )i;
5224 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5225 ovfl_arg->active_set = 0;
5226 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5227 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5229 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5230 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5231 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5234 * copy values of pmds of interest. Sampling format may copy them
5235 * into sampling buffer.
5238 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5239 if ((smpl_pmds & 0x1) == 0) continue;
5240 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5241 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5245 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5247 start_cycles = ia64_get_itc();
5250 * call custom buffer format record (handler) routine
5252 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5254 end_cycles = ia64_get_itc();
5257 * For those controls, we take the union because they have
5258 * an all or nothing behavior.
5260 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5261 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5262 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5264 * build the bitmask of pmds to reset now
5266 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5268 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5271 * when the module cannot handle the rest of the overflows, we abort right here
5273 if (ret && pmd_mask) {
5274 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5275 pmd_mask<<PMU_FIRST_COUNTER));
5278 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5280 ovfl_pmds &= ~reset_pmds;
5283 * when no sampling module is used, then the default
5284 * is to notify on overflow if requested by user
5286 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5287 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5288 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5289 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5291 * if needed, we reset all overflowed pmds
5293 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5296 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5299 * reset the requested PMD registers using the short reset values
5302 unsigned long bm = reset_pmds;
5303 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5306 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5308 * keep track of what to reset when unblocking
5310 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5313 * check for blocking context
5315 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5317 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5320 * set the perfmon specific checking pending work for the task
5322 PFM_SET_WORK_PENDING(task, 1);
5325 * when coming from ctxsw, current still points to the
5326 * previous task, therefore we must work with task and not current.
5328 set_notify_resume(task);
5331 * defer until state is changed (shorten spin window). the context is locked
5332 * anyway, so the signal receiver would come spin for nothing.
5337 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5338 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5339 PFM_GET_WORK_PENDING(task),
5340 ctx->ctx_fl_trap_reason,
5343 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5345 * in case monitoring must be stopped, we toggle the psr bits
5347 if (ovfl_ctrl.bits.mask_monitoring) {
5348 pfm_mask_monitoring(task);
5349 ctx->ctx_state = PFM_CTX_MASKED;
5350 ctx->ctx_fl_can_restart = 1;
5354 * send notification now
5356 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5361 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5363 task ? task_pid_nr(task) : -1,
5369 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5370 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5371 * come here as zombie only if the task is the current task. In which case, we
5372 * can access the PMU hardware directly.
5374 * Note that zombies do have PM_VALID set. So here we do the minimal.
5376 * In case the context was zombified it could not be reclaimed at the time
5377 * the monitoring program exited. At this point, the PMU reservation has been
5378 * returned, the sampiing buffer has been freed. We must convert this call
5379 * into a spurious interrupt. However, we must also avoid infinite overflows
5380 * by stopping monitoring for this task. We can only come here for a per-task
5381 * context. All we need to do is to stop monitoring using the psr bits which
5382 * are always task private. By re-enabling secure montioring, we ensure that
5383 * the monitored task will not be able to re-activate monitoring.
5384 * The task will eventually be context switched out, at which point the context
5385 * will be reclaimed (that includes releasing ownership of the PMU).
5387 * So there might be a window of time where the number of per-task session is zero
5388 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5389 * context. This is safe because if a per-task session comes in, it will push this one
5390 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5391 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5392 * also push our zombie context out.
5394 * Overall pretty hairy stuff....
5396 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5398 ia64_psr(regs)->up = 0;
5399 ia64_psr(regs)->sp = 1;
5404 pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5406 struct task_struct *task;
5408 unsigned long flags;
5410 int this_cpu = smp_processor_id();
5413 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5416 * srlz.d done before arriving here
5418 pmc0 = ia64_get_pmc(0);
5420 task = GET_PMU_OWNER();
5421 ctx = GET_PMU_CTX();
5424 * if we have some pending bits set
5425 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5427 if (PMC0_HAS_OVFL(pmc0) && task) {
5429 * we assume that pmc0.fr is always set here
5433 if (!ctx) goto report_spurious1;
5435 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5436 goto report_spurious2;
5438 PROTECT_CTX_NOPRINT(ctx, flags);
5440 pfm_overflow_handler(task, ctx, pmc0, regs);
5442 UNPROTECT_CTX_NOPRINT(ctx, flags);
5445 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5449 * keep it unfrozen at all times
5456 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5457 this_cpu, task_pid_nr(task));
5461 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5469 pfm_interrupt_handler(int irq, void *arg)
5471 unsigned long start_cycles, total_cycles;
5472 unsigned long min, max;
5475 struct pt_regs *regs = get_irq_regs();
5477 this_cpu = get_cpu();
5478 if (likely(!pfm_alt_intr_handler)) {
5479 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5480 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5482 start_cycles = ia64_get_itc();
5484 ret = pfm_do_interrupt_handler(arg, regs);
5486 total_cycles = ia64_get_itc();
5489 * don't measure spurious interrupts
5491 if (likely(ret == 0)) {
5492 total_cycles -= start_cycles;
5494 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5495 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5497 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5501 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5509 * /proc/perfmon interface, for debug only
5512 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5515 pfm_proc_start(struct seq_file *m, loff_t *pos)
5518 return PFM_PROC_SHOW_HEADER;
5521 while (*pos <= nr_cpu_ids) {
5522 if (cpu_online(*pos - 1)) {
5523 return (void *)*pos;
5531 pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5534 return pfm_proc_start(m, pos);
5538 pfm_proc_stop(struct seq_file *m, void *v)
5543 pfm_proc_show_header(struct seq_file *m)
5545 struct list_head * pos;
5546 pfm_buffer_fmt_t * entry;
5547 unsigned long flags;
5550 "perfmon version : %u.%u\n"
5553 "expert mode : %s\n"
5554 "ovfl_mask : 0x%lx\n"
5555 "PMU flags : 0x%x\n",
5556 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5558 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5559 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5566 "proc_sessions : %u\n"
5567 "sys_sessions : %u\n"
5568 "sys_use_dbregs : %u\n"
5569 "ptrace_use_dbregs : %u\n",
5570 pfm_sessions.pfs_task_sessions,
5571 pfm_sessions.pfs_sys_sessions,
5572 pfm_sessions.pfs_sys_use_dbregs,
5573 pfm_sessions.pfs_ptrace_use_dbregs);
5577 spin_lock(&pfm_buffer_fmt_lock);
5579 list_for_each(pos, &pfm_buffer_fmt_list) {
5580 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5581 seq_printf(m, "format : %16phD %s\n",
5582 entry->fmt_uuid, entry->fmt_name);
5584 spin_unlock(&pfm_buffer_fmt_lock);
5589 pfm_proc_show(struct seq_file *m, void *v)
5595 if (v == PFM_PROC_SHOW_HEADER) {
5596 pfm_proc_show_header(m);
5600 /* show info for CPU (v - 1) */
5604 "CPU%-2d overflow intrs : %lu\n"
5605 "CPU%-2d overflow cycles : %lu\n"
5606 "CPU%-2d overflow min : %lu\n"
5607 "CPU%-2d overflow max : %lu\n"
5608 "CPU%-2d smpl handler calls : %lu\n"
5609 "CPU%-2d smpl handler cycles : %lu\n"
5610 "CPU%-2d spurious intrs : %lu\n"
5611 "CPU%-2d replay intrs : %lu\n"
5612 "CPU%-2d syst_wide : %d\n"
5613 "CPU%-2d dcr_pp : %d\n"
5614 "CPU%-2d exclude idle : %d\n"
5615 "CPU%-2d owner : %d\n"
5616 "CPU%-2d context : %p\n"
5617 "CPU%-2d activations : %lu\n",
5618 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5619 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5620 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5621 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5622 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5623 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5624 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5625 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5626 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5627 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5628 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5629 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5630 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5631 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5633 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5635 psr = pfm_get_psr();
5640 "CPU%-2d psr : 0x%lx\n"
5641 "CPU%-2d pmc0 : 0x%lx\n",
5643 cpu, ia64_get_pmc(0));
5645 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5646 if (PMC_IS_COUNTING(i) == 0) continue;
5648 "CPU%-2d pmc%u : 0x%lx\n"
5649 "CPU%-2d pmd%u : 0x%lx\n",
5650 cpu, i, ia64_get_pmc(i),
5651 cpu, i, ia64_get_pmd(i));
5657 const struct seq_operations pfm_seq_ops = {
5658 .start = pfm_proc_start,
5659 .next = pfm_proc_next,
5660 .stop = pfm_proc_stop,
5661 .show = pfm_proc_show
5665 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5666 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5667 * is active or inactive based on mode. We must rely on the value in
5668 * local_cpu_data->pfm_syst_info
5671 pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5673 struct pt_regs *regs;
5675 unsigned long dcr_pp;
5677 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5680 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5681 * on every CPU, so we can rely on the pid to identify the idle task.
5683 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5684 regs = task_pt_regs(task);
5685 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5689 * if monitoring has started
5692 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5694 * context switching in?
5697 /* mask monitoring for the idle task */
5698 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5704 * context switching out
5705 * restore monitoring for next task
5707 * Due to inlining this odd if-then-else construction generates
5710 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5719 pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5721 struct task_struct *task = ctx->ctx_task;
5723 ia64_psr(regs)->up = 0;
5724 ia64_psr(regs)->sp = 1;
5726 if (GET_PMU_OWNER() == task) {
5727 DPRINT(("cleared ownership for [%d]\n",
5728 task_pid_nr(ctx->ctx_task)));
5729 SET_PMU_OWNER(NULL, NULL);
5733 * disconnect the task from the context and vice-versa
5735 PFM_SET_WORK_PENDING(task, 0);
5737 task->thread.pfm_context = NULL;
5738 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5740 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5745 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5748 pfm_save_regs(struct task_struct *task)
5751 unsigned long flags;
5755 ctx = PFM_GET_CTX(task);
5756 if (ctx == NULL) return;
5759 * we always come here with interrupts ALREADY disabled by
5760 * the scheduler. So we simply need to protect against concurrent
5761 * access, not CPU concurrency.
5763 flags = pfm_protect_ctx_ctxsw(ctx);
5765 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5766 struct pt_regs *regs = task_pt_regs(task);
5770 pfm_force_cleanup(ctx, regs);
5772 BUG_ON(ctx->ctx_smpl_hdr);
5774 pfm_unprotect_ctx_ctxsw(ctx, flags);
5776 pfm_context_free(ctx);
5781 * save current PSR: needed because we modify it
5784 psr = pfm_get_psr();
5786 BUG_ON(psr & (IA64_PSR_I));
5790 * This is the last instruction which may generate an overflow
5792 * We do not need to set psr.sp because, it is irrelevant in kernel.
5793 * It will be restored from ipsr when going back to user level
5798 * keep a copy of psr.up (for reload)
5800 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5803 * release ownership of this PMU.
5804 * PM interrupts are masked, so nothing
5807 SET_PMU_OWNER(NULL, NULL);
5810 * we systematically save the PMD as we have no
5811 * guarantee we will be schedule at that same
5814 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5817 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5818 * we will need it on the restore path to check
5819 * for pending overflow.
5821 ctx->th_pmcs[0] = ia64_get_pmc(0);
5824 * unfreeze PMU if had pending overflows
5826 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5829 * finally, allow context access.
5830 * interrupts will still be masked after this call.
5832 pfm_unprotect_ctx_ctxsw(ctx, flags);
5835 #else /* !CONFIG_SMP */
5837 pfm_save_regs(struct task_struct *task)
5842 ctx = PFM_GET_CTX(task);
5843 if (ctx == NULL) return;
5846 * save current PSR: needed because we modify it
5848 psr = pfm_get_psr();
5850 BUG_ON(psr & (IA64_PSR_I));
5854 * This is the last instruction which may generate an overflow
5856 * We do not need to set psr.sp because, it is irrelevant in kernel.
5857 * It will be restored from ipsr when going back to user level
5862 * keep a copy of psr.up (for reload)
5864 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5868 pfm_lazy_save_regs (struct task_struct *task)
5871 unsigned long flags;
5873 { u64 psr = pfm_get_psr();
5874 BUG_ON(psr & IA64_PSR_UP);
5877 ctx = PFM_GET_CTX(task);
5880 * we need to mask PMU overflow here to
5881 * make sure that we maintain pmc0 until
5882 * we save it. overflow interrupts are
5883 * treated as spurious if there is no
5886 * XXX: I don't think this is necessary
5888 PROTECT_CTX(ctx,flags);
5891 * release ownership of this PMU.
5892 * must be done before we save the registers.
5894 * after this call any PMU interrupt is treated
5897 SET_PMU_OWNER(NULL, NULL);
5900 * save all the pmds we use
5902 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5905 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5906 * it is needed to check for pended overflow
5907 * on the restore path
5909 ctx->th_pmcs[0] = ia64_get_pmc(0);
5912 * unfreeze PMU if had pending overflows
5914 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5917 * now get can unmask PMU interrupts, they will
5918 * be treated as purely spurious and we will not
5919 * lose any information
5921 UNPROTECT_CTX(ctx,flags);
5923 #endif /* CONFIG_SMP */
5927 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5930 pfm_load_regs (struct task_struct *task)
5933 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
5934 unsigned long flags;
5936 int need_irq_resend;
5938 ctx = PFM_GET_CTX(task);
5939 if (unlikely(ctx == NULL)) return;
5941 BUG_ON(GET_PMU_OWNER());
5944 * possible on unload
5946 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
5949 * we always come here with interrupts ALREADY disabled by
5950 * the scheduler. So we simply need to protect against concurrent
5951 * access, not CPU concurrency.
5953 flags = pfm_protect_ctx_ctxsw(ctx);
5954 psr = pfm_get_psr();
5956 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
5958 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
5959 BUG_ON(psr & IA64_PSR_I);
5961 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
5962 struct pt_regs *regs = task_pt_regs(task);
5964 BUG_ON(ctx->ctx_smpl_hdr);
5966 pfm_force_cleanup(ctx, regs);
5968 pfm_unprotect_ctx_ctxsw(ctx, flags);
5971 * this one (kmalloc'ed) is fine with interrupts disabled
5973 pfm_context_free(ctx);
5979 * we restore ALL the debug registers to avoid picking up
5982 if (ctx->ctx_fl_using_dbreg) {
5983 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
5984 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
5987 * retrieve saved psr.up
5989 psr_up = ctx->ctx_saved_psr_up;
5992 * if we were the last user of the PMU on that CPU,
5993 * then nothing to do except restore psr
5995 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
5998 * retrieve partial reload masks (due to user modifications)
6000 pmc_mask = ctx->ctx_reload_pmcs[0];
6001 pmd_mask = ctx->ctx_reload_pmds[0];
6005 * To avoid leaking information to the user level when psr.sp=0,
6006 * we must reload ALL implemented pmds (even the ones we don't use).
6007 * In the kernel we only allow PFM_READ_PMDS on registers which
6008 * we initialized or requested (sampling) so there is no risk there.
6010 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6013 * ALL accessible PMCs are systematically reloaded, unused registers
6014 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6015 * up stale configuration.
6017 * PMC0 is never in the mask. It is always restored separately.
6019 pmc_mask = ctx->ctx_all_pmcs[0];
6022 * when context is MASKED, we will restore PMC with plm=0
6023 * and PMD with stale information, but that's ok, nothing
6026 * XXX: optimize here
6028 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6029 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6032 * check for pending overflow at the time the state
6035 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6037 * reload pmc0 with the overflow information
6038 * On McKinley PMU, this will trigger a PMU interrupt
6040 ia64_set_pmc(0, ctx->th_pmcs[0]);
6042 ctx->th_pmcs[0] = 0UL;
6045 * will replay the PMU interrupt
6047 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6049 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6053 * we just did a reload, so we reset the partial reload fields
6055 ctx->ctx_reload_pmcs[0] = 0UL;
6056 ctx->ctx_reload_pmds[0] = 0UL;
6058 SET_LAST_CPU(ctx, smp_processor_id());
6061 * dump activation value for this PMU
6065 * record current activation for this context
6067 SET_ACTIVATION(ctx);
6070 * establish new ownership.
6072 SET_PMU_OWNER(task, ctx);
6075 * restore the psr.up bit. measurement
6077 * no PMU interrupt can happen at this point
6078 * because we still have interrupts disabled.
6080 if (likely(psr_up)) pfm_set_psr_up();
6083 * allow concurrent access to context
6085 pfm_unprotect_ctx_ctxsw(ctx, flags);
6087 #else /* !CONFIG_SMP */
6089 * reload PMU state for UP kernels
6090 * in 2.5 we come here with interrupts disabled
6093 pfm_load_regs (struct task_struct *task)
6096 struct task_struct *owner;
6097 unsigned long pmd_mask, pmc_mask;
6099 int need_irq_resend;
6101 owner = GET_PMU_OWNER();
6102 ctx = PFM_GET_CTX(task);
6103 psr = pfm_get_psr();
6105 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6106 BUG_ON(psr & IA64_PSR_I);
6109 * we restore ALL the debug registers to avoid picking up
6112 * This must be done even when the task is still the owner
6113 * as the registers may have been modified via ptrace()
6114 * (not perfmon) by the previous task.
6116 if (ctx->ctx_fl_using_dbreg) {
6117 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6118 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6122 * retrieved saved psr.up
6124 psr_up = ctx->ctx_saved_psr_up;
6125 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6128 * short path, our state is still there, just
6129 * need to restore psr and we go
6131 * we do not touch either PMC nor PMD. the psr is not touched
6132 * by the overflow_handler. So we are safe w.r.t. to interrupt
6133 * concurrency even without interrupt masking.
6135 if (likely(owner == task)) {
6136 if (likely(psr_up)) pfm_set_psr_up();
6141 * someone else is still using the PMU, first push it out and
6142 * then we'll be able to install our stuff !
6144 * Upon return, there will be no owner for the current PMU
6146 if (owner) pfm_lazy_save_regs(owner);
6149 * To avoid leaking information to the user level when psr.sp=0,
6150 * we must reload ALL implemented pmds (even the ones we don't use).
6151 * In the kernel we only allow PFM_READ_PMDS on registers which
6152 * we initialized or requested (sampling) so there is no risk there.
6154 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6157 * ALL accessible PMCs are systematically reloaded, unused registers
6158 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6159 * up stale configuration.
6161 * PMC0 is never in the mask. It is always restored separately
6163 pmc_mask = ctx->ctx_all_pmcs[0];
6165 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6166 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6169 * check for pending overflow at the time the state
6172 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6174 * reload pmc0 with the overflow information
6175 * On McKinley PMU, this will trigger a PMU interrupt
6177 ia64_set_pmc(0, ctx->th_pmcs[0]);
6180 ctx->th_pmcs[0] = 0UL;
6183 * will replay the PMU interrupt
6185 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6187 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6191 * establish new ownership.
6193 SET_PMU_OWNER(task, ctx);
6196 * restore the psr.up bit. measurement
6198 * no PMU interrupt can happen at this point
6199 * because we still have interrupts disabled.
6201 if (likely(psr_up)) pfm_set_psr_up();
6203 #endif /* CONFIG_SMP */
6206 * this function assumes monitoring is stopped
6209 pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6212 unsigned long mask2, val, pmd_val, ovfl_val;
6213 int i, can_access_pmu = 0;
6217 * is the caller the task being monitored (or which initiated the
6218 * session for system wide measurements)
6220 is_self = ctx->ctx_task == task ? 1 : 0;
6223 * can access PMU is task is the owner of the PMU state on the current CPU
6224 * or if we are running on the CPU bound to the context in system-wide mode
6225 * (that is not necessarily the task the context is attached to in this mode).
6226 * In system-wide we always have can_access_pmu true because a task running on an
6227 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6229 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6230 if (can_access_pmu) {
6232 * Mark the PMU as not owned
6233 * This will cause the interrupt handler to do nothing in case an overflow
6234 * interrupt was in-flight
6235 * This also guarantees that pmc0 will contain the final state
6236 * It virtually gives us full control on overflow processing from that point
6239 SET_PMU_OWNER(NULL, NULL);
6240 DPRINT(("releasing ownership\n"));
6243 * read current overflow status:
6245 * we are guaranteed to read the final stable state
6248 pmc0 = ia64_get_pmc(0); /* slow */
6251 * reset freeze bit, overflow status information destroyed
6255 pmc0 = ctx->th_pmcs[0];
6257 * clear whatever overflow status bits there were
6259 ctx->th_pmcs[0] = 0;
6261 ovfl_val = pmu_conf->ovfl_val;
6263 * we save all the used pmds
6264 * we take care of overflows for counting PMDs
6266 * XXX: sampling situation is not taken into account here
6268 mask2 = ctx->ctx_used_pmds[0];
6270 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6272 for (i = 0; mask2; i++, mask2>>=1) {
6274 /* skip non used pmds */
6275 if ((mask2 & 0x1) == 0) continue;
6278 * can access PMU always true in system wide mode
6280 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6282 if (PMD_IS_COUNTING(i)) {
6283 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6286 ctx->ctx_pmds[i].val,
6290 * we rebuild the full 64 bit value of the counter
6292 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6295 * now everything is in ctx_pmds[] and we need
6296 * to clear the saved context from save_regs() such that
6297 * pfm_read_pmds() gets the correct value
6302 * take care of overflow inline
6304 if (pmc0 & (1UL << i)) {
6305 val += 1 + ovfl_val;
6306 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6310 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6312 if (is_self) ctx->th_pmds[i] = pmd_val;
6314 ctx->ctx_pmds[i].val = val;
6319 pfm_alt_save_pmu_state(void *data)
6321 struct pt_regs *regs;
6323 regs = task_pt_regs(current);
6325 DPRINT(("called\n"));
6328 * should not be necessary but
6329 * let's take not risk
6333 ia64_psr(regs)->pp = 0;
6336 * This call is required
6337 * May cause a spurious interrupt on some processors
6345 pfm_alt_restore_pmu_state(void *data)
6347 struct pt_regs *regs;
6349 regs = task_pt_regs(current);
6351 DPRINT(("called\n"));
6354 * put PMU back in state expected
6359 ia64_psr(regs)->pp = 0;
6362 * perfmon runs with PMU unfrozen at all times
6370 pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6375 /* some sanity checks */
6376 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6378 /* do the easy test first */
6379 if (pfm_alt_intr_handler) return -EBUSY;
6381 /* one at a time in the install or remove, just fail the others */
6382 if (!spin_trylock(&pfm_alt_install_check)) {
6386 /* reserve our session */
6387 for_each_online_cpu(reserve_cpu) {
6388 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6389 if (ret) goto cleanup_reserve;
6392 /* save the current system wide pmu states */
6393 on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6395 /* officially change to the alternate interrupt handler */
6396 pfm_alt_intr_handler = hdl;
6398 spin_unlock(&pfm_alt_install_check);
6403 for_each_online_cpu(i) {
6404 /* don't unreserve more than we reserved */
6405 if (i >= reserve_cpu) break;
6407 pfm_unreserve_session(NULL, 1, i);
6410 spin_unlock(&pfm_alt_install_check);
6414 EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6417 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6421 if (hdl == NULL) return -EINVAL;
6423 /* cannot remove someone else's handler! */
6424 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6426 /* one at a time in the install or remove, just fail the others */
6427 if (!spin_trylock(&pfm_alt_install_check)) {
6431 pfm_alt_intr_handler = NULL;
6433 on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6435 for_each_online_cpu(i) {
6436 pfm_unreserve_session(NULL, 1, i);
6439 spin_unlock(&pfm_alt_install_check);
6443 EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6446 * perfmon initialization routine, called from the initcall() table
6448 static int init_pfm_fs(void);
6456 family = local_cpu_data->family;
6461 if ((*p)->probe() == 0) goto found;
6462 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6476 unsigned int n, n_counters, i;
6478 printk("perfmon: version %u.%u IRQ %u\n",
6481 IA64_PERFMON_VECTOR);
6483 if (pfm_probe_pmu()) {
6484 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6485 local_cpu_data->family);
6490 * compute the number of implemented PMD/PMC from the
6491 * description tables
6494 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6495 if (PMC_IS_IMPL(i) == 0) continue;
6496 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6499 pmu_conf->num_pmcs = n;
6501 n = 0; n_counters = 0;
6502 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6503 if (PMD_IS_IMPL(i) == 0) continue;
6504 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6506 if (PMD_IS_COUNTING(i)) n_counters++;
6508 pmu_conf->num_pmds = n;
6509 pmu_conf->num_counters = n_counters;
6512 * sanity checks on the number of debug registers
6514 if (pmu_conf->use_rr_dbregs) {
6515 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6516 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6520 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6521 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6527 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6531 pmu_conf->num_counters,
6532 ffz(pmu_conf->ovfl_val));
6535 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6536 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6542 * create /proc/perfmon (mostly for debugging purposes)
6544 perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops);
6545 if (perfmon_dir == NULL) {
6546 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6552 * create /proc/sys/kernel/perfmon (for debugging purposes)
6554 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6557 * initialize all our spinlocks
6559 spin_lock_init(&pfm_sessions.pfs_lock);
6560 spin_lock_init(&pfm_buffer_fmt_lock);
6564 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6569 __initcall(pfm_init);
6572 * this function is called before pfm_init()
6575 pfm_init_percpu (void)
6577 static int first_time=1;
6579 * make sure no measurement is active
6580 * (may inherit programmed PMCs from EFI).
6586 * we run with the PMU not frozen at all times
6591 register_percpu_irq(IA64_PERFMON_VECTOR, pfm_interrupt_handler,
6596 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6601 * used for debug purposes only
6604 dump_pmu_state(const char *from)
6606 struct task_struct *task;
6607 struct pt_regs *regs;
6609 unsigned long psr, dcr, info, flags;
6612 local_irq_save(flags);
6614 this_cpu = smp_processor_id();
6615 regs = task_pt_regs(current);
6616 info = PFM_CPUINFO_GET();
6617 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6619 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6620 local_irq_restore(flags);
6624 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6627 task_pid_nr(current),
6631 task = GET_PMU_OWNER();
6632 ctx = GET_PMU_CTX();
6634 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6636 psr = pfm_get_psr();
6638 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6641 psr & IA64_PSR_PP ? 1 : 0,
6642 psr & IA64_PSR_UP ? 1 : 0,
6643 dcr & IA64_DCR_PP ? 1 : 0,
6646 ia64_psr(regs)->pp);
6648 ia64_psr(regs)->up = 0;
6649 ia64_psr(regs)->pp = 0;
6651 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6652 if (PMC_IS_IMPL(i) == 0) continue;
6653 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6656 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6657 if (PMD_IS_IMPL(i) == 0) continue;
6658 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6662 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6665 ctx->ctx_smpl_vaddr,
6669 ctx->ctx_saved_psr_up);
6671 local_irq_restore(flags);
6675 * called from process.c:copy_thread(). task is new child.
6678 pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6680 struct thread_struct *thread;
6682 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6684 thread = &task->thread;
6687 * cut links inherited from parent (current)
6689 thread->pfm_context = NULL;
6691 PFM_SET_WORK_PENDING(task, 0);
6694 * the psr bits are already set properly in copy_threads()
6697 #else /* !CONFIG_PERFMON */
6699 sys_perfmonctl (int fd, int cmd, void *arg, int count)
6703 #endif /* CONFIG_PERFMON */