Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / kernel / events / core.c
index 1c5e324..4649170 100644 (file)
@@ -132,6 +132,7 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
 
 /**
  * cpu_function_call - call a function on the cpu
+ * @cpu:       target cpu to queue this function
  * @func:      the function to be called
  * @info:      the function call argument
  *
@@ -3821,9 +3822,16 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
                                        struct task_struct *task)
 {
        struct perf_cpu_context *cpuctx;
-       struct pmu *pmu = ctx->pmu;
+       struct pmu *pmu;
 
        cpuctx = __get_cpu_context(ctx);
+
+       /*
+        * HACK: for HETEROGENEOUS the task context might have switched to a
+        * different PMU, force (re)set the context,
+        */
+       pmu = ctx->pmu = cpuctx->ctx.pmu;
+
        if (cpuctx->task_ctx == ctx) {
                if (cpuctx->sched_cb_usage)
                        __perf_pmu_sched_task(cpuctx, true);
@@ -6669,10 +6677,10 @@ out:
        return data->aux_size;
 }
 
-long perf_pmu_snapshot_aux(struct perf_buffer *rb,
-                          struct perf_event *event,
-                          struct perf_output_handle *handle,
-                          unsigned long size)
+static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
+                                 struct perf_event *event,
+                                 struct perf_output_handle *handle,
+                                 unsigned long size)
 {
        unsigned long flags;
        long ret;
@@ -8680,13 +8688,12 @@ static void perf_event_switch(struct task_struct *task,
                },
        };
 
-       if (!sched_in && task->state == TASK_RUNNING)
+       if (!sched_in && task->on_rq) {
                switch_event.event_id.header.misc |=
                                PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
+       }
 
-       perf_iterate_sb(perf_event_switch_output,
-                      &switch_event,
-                      NULL);
+       perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
 }
 
 /*
@@ -11917,6 +11924,7 @@ again:
  * @pid:               target pid
  * @cpu:               target cpu
  * @group_fd:          group leader event fd
+ * @flags:             perf event open flags
  */
 SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_attr __user *, attr_uptr,
@@ -12373,6 +12381,8 @@ err_fd:
  * @attr: attributes of the counter to create
  * @cpu: cpu in which the counter is bound
  * @task: task to profile (NULL for percpu)
+ * @overflow_handler: callback to trigger when we hit the event
+ * @context: context data could be used in overflow_handler callback
  */
 struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,