chain->nr = i;
 }
 
+/*
+ * Hardware sample records, created some time after the event occurred, need to
+ * have subsequent addresses removed from the call chain.
+ */
+void thread_stack__sample_late(struct thread *thread, int cpu,
+                              struct ip_callchain *chain, size_t sz,
+                              u64 sample_ip, u64 kernel_start)
+{
+       struct thread_stack *ts = thread__stack(thread, cpu);
+       u64 sample_context = callchain_context(sample_ip, kernel_start);
+       u64 last_context, context, ip;
+       size_t nr = 0, j;
+
+       if (sz < 2) {
+               chain->nr = 0;
+               return;
+       }
+
+       if (!ts)
+               goto out;
+
+       /*
+        * When tracing kernel space, kernel addresses occur at the top of the
+        * call chain after the event occurred but before tracing stopped.
+        * Skip them.
+        */
+       for (j = 1; j <= ts->cnt; j++) {
+               ip = ts->stack[ts->cnt - j].ret_addr;
+               context = callchain_context(ip, kernel_start);
+               if (context == PERF_CONTEXT_USER ||
+                   (context == sample_context && ip == sample_ip))
+                       break;
+       }
+
+       last_context = sample_ip; /* Use sample_ip as an invalid context */
+
+       for (; nr < sz && j <= ts->cnt; nr++, j++) {
+               ip = ts->stack[ts->cnt - j].ret_addr;
+               context = callchain_context(ip, kernel_start);
+               if (context != last_context) {
+                       if (nr >= sz - 1)
+                               break;
+                       chain->ips[nr++] = context;
+                       last_context = context;
+               }
+               chain->ips[nr] = ip;
+       }
+out:
+       if (nr) {
+               chain->nr = nr;
+       } else {
+               chain->ips[0] = sample_context;
+               chain->ips[1] = sample_ip;
+               chain->nr = 2;
+       }
+}
+
 struct call_return_processor *
 call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
                           void *data)
 
 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
 void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
                          size_t sz, u64 ip, u64 kernel_start);
+void thread_stack__sample_late(struct thread *thread, int cpu,
+                              struct ip_callchain *chain, size_t sz, u64 ip,
+                              u64 kernel_start);
 int thread_stack__flush(struct thread *thread);
 void thread_stack__free(struct thread *thread);
 size_t thread_stack__depth(struct thread *thread, int cpu);