2 * Copyright © 2014-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/debugfs.h>
27 #include "intel_guc_log.h"
30 static void guc_log_capture_logs(struct intel_guc_log *log);
33 * DOC: GuC firmware log
35 * Firmware log is enabled by setting i915.guc_log_level to the positive level.
36 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
37 * i915_guc_load_status will print out firmware loading status and scratch
41 static int guc_action_flush_log_complete(struct intel_guc *guc)
44 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
47 return intel_guc_send(guc, action, ARRAY_SIZE(action));
50 static int guc_action_flush_log(struct intel_guc *guc)
53 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
57 return intel_guc_send(guc, action, ARRAY_SIZE(action));
60 static int guc_action_control_log(struct intel_guc *guc, bool enable,
61 bool default_logging, u32 verbosity)
64 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
65 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
66 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
67 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
70 GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
72 return intel_guc_send(guc, action, ARRAY_SIZE(action));
75 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
77 return container_of(log, struct intel_guc, log);
80 static void guc_log_enable_flush_events(struct intel_guc_log *log)
82 intel_guc_enable_msg(log_to_guc(log),
83 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
84 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
87 static void guc_log_disable_flush_events(struct intel_guc_log *log)
89 intel_guc_disable_msg(log_to_guc(log),
90 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
91 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
95 * Sub buffer switch callback. Called whenever relay has to switch to a new
96 * sub buffer, relay stays on the same sub buffer if 0 is returned.
98 static int subbuf_start_callback(struct rchan_buf *buf,
104 * Use no-overwrite mode by default, where relay will stop accepting
105 * new data if there are no empty sub buffers left.
106 * There is no strict synchronization enforced by relay between Consumer
107 * and Producer. In overwrite mode, there is a possibility of getting
108 * inconsistent/garbled data, the producer could be writing on to the
109 * same sub buffer from which Consumer is reading. This can't be avoided
110 * unless Consumer is fast enough and can always run in tandem with
113 if (relay_buf_full(buf))
120 * file_create() callback. Creates relay file in debugfs.
122 static struct dentry *create_buf_file_callback(const char *filename,
123 struct dentry *parent,
125 struct rchan_buf *buf,
128 struct dentry *buf_file;
131 * This to enable the use of a single buffer for the relay channel and
132 * correspondingly have a single file exposed to User, through which
133 * it can collect the logs in order without any post-processing.
134 * Need to set 'is_global' even if parent is NULL for early logging.
141 buf_file = debugfs_create_file(filename, mode,
142 parent, buf, &relay_file_operations);
143 if (IS_ERR(buf_file))
150 * file_remove() default callback. Removes relay file in debugfs.
152 static int remove_buf_file_callback(struct dentry *dentry)
154 debugfs_remove(dentry);
158 /* relay channel callbacks */
159 static struct rchan_callbacks relay_callbacks = {
160 .subbuf_start = subbuf_start_callback,
161 .create_buf_file = create_buf_file_callback,
162 .remove_buf_file = remove_buf_file_callback,
165 static void guc_move_to_next_buf(struct intel_guc_log *log)
168 * Make sure the updates made in the sub buffer are visible when
169 * Consumer sees the following update to offset inside the sub buffer.
173 /* All data has been written, so now move the offset of sub buffer. */
174 relay_reserve(log->relay.channel, log->vma->obj->base.size);
176 /* Switch to the next sub buffer */
177 relay_flush(log->relay.channel);
180 static void *guc_get_write_buffer(struct intel_guc_log *log)
183 * Just get the base address of a new sub buffer and copy data into it
184 * ourselves. NULL will be returned in no-overwrite mode, if all sub
185 * buffers are full. Could have used the relay_write() to indirectly
186 * copy the data, but that would have been bit convoluted, as we need to
187 * write to only certain locations inside a sub buffer which cannot be
188 * done without using relay_reserve() along with relay_write(). So its
189 * better to use relay_reserve() alone.
191 return relay_reserve(log->relay.channel, 0);
194 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
195 enum guc_log_buffer_type type,
196 unsigned int full_cnt)
198 unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
199 bool overflow = false;
201 if (full_cnt != prev_full_cnt) {
204 log->stats[type].overflow = full_cnt;
205 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
207 if (full_cnt < prev_full_cnt) {
208 /* buffer_full_cnt is a 4 bit counter */
209 log->stats[type].sampled_overflow += 16;
211 DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
217 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
220 case GUC_ISR_LOG_BUFFER:
221 return ISR_BUFFER_SIZE;
222 case GUC_DPC_LOG_BUFFER:
223 return DPC_BUFFER_SIZE;
224 case GUC_CRASH_DUMP_LOG_BUFFER:
225 return CRASH_BUFFER_SIZE;
233 static void guc_read_update_log_buffer(struct intel_guc_log *log)
235 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
236 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
237 struct guc_log_buffer_state log_buf_state_local;
238 enum guc_log_buffer_type type;
239 void *src_data, *dst_data;
242 mutex_lock(&log->relay.lock);
244 if (WARN_ON(!intel_guc_log_relay_enabled(log)))
247 /* Get the pointer to shared GuC log buffer */
248 log_buf_state = src_data = log->relay.buf_addr;
250 /* Get the pointer to local buffer to store the logs */
251 log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
253 if (unlikely(!log_buf_snapshot_state)) {
255 * Used rate limited to avoid deluge of messages, logs might be
256 * getting consumed by User at a slow rate.
258 DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
259 log->relay.full_count++;
264 /* Actual logs are present from the 2nd page */
265 src_data += PAGE_SIZE;
266 dst_data += PAGE_SIZE;
268 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
270 * Make a copy of the state structure, inside GuC log buffer
271 * (which is uncached mapped), on the stack to avoid reading
272 * from it multiple times.
274 memcpy(&log_buf_state_local, log_buf_state,
275 sizeof(struct guc_log_buffer_state));
276 buffer_size = guc_get_log_buffer_size(type);
277 read_offset = log_buf_state_local.read_ptr;
278 write_offset = log_buf_state_local.sampled_write_ptr;
279 full_cnt = log_buf_state_local.buffer_full_cnt;
281 /* Bookkeeping stuff */
282 log->stats[type].flush += log_buf_state_local.flush_to_file;
283 new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
285 /* Update the state of shared log buffer */
286 log_buf_state->read_ptr = write_offset;
287 log_buf_state->flush_to_file = 0;
290 /* First copy the state structure in snapshot buffer */
291 memcpy(log_buf_snapshot_state, &log_buf_state_local,
292 sizeof(struct guc_log_buffer_state));
295 * The write pointer could have been updated by GuC firmware,
296 * after sending the flush interrupt to Host, for consistency
297 * set write pointer value to same value of sampled_write_ptr
298 * in the snapshot buffer.
300 log_buf_snapshot_state->write_ptr = write_offset;
301 log_buf_snapshot_state++;
303 /* Now copy the actual logs. */
304 if (unlikely(new_overflow)) {
305 /* copy the whole buffer in case of overflow */
307 write_offset = buffer_size;
308 } else if (unlikely((read_offset > buffer_size) ||
309 (write_offset > buffer_size))) {
310 DRM_ERROR("invalid log buffer state\n");
311 /* copy whole buffer as offsets are unreliable */
313 write_offset = buffer_size;
316 /* Just copy the newly written data */
317 if (read_offset > write_offset) {
318 i915_memcpy_from_wc(dst_data, src_data, write_offset);
319 bytes_to_copy = buffer_size - read_offset;
321 bytes_to_copy = write_offset - read_offset;
323 i915_memcpy_from_wc(dst_data + read_offset,
324 src_data + read_offset, bytes_to_copy);
326 src_data += buffer_size;
327 dst_data += buffer_size;
330 guc_move_to_next_buf(log);
333 mutex_unlock(&log->relay.lock);
336 static void capture_logs_work(struct work_struct *work)
338 struct intel_guc_log *log =
339 container_of(work, struct intel_guc_log, relay.flush_work);
341 guc_log_capture_logs(log);
344 static int guc_log_map(struct intel_guc_log *log)
346 struct intel_guc *guc = log_to_guc(log);
347 struct drm_i915_private *dev_priv = guc_to_i915(guc);
351 lockdep_assert_held(&log->relay.lock);
356 mutex_lock(&dev_priv->drm.struct_mutex);
357 ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
358 mutex_unlock(&dev_priv->drm.struct_mutex);
363 * Create a WC (Uncached for read) vmalloc mapping of log
364 * buffer pages, so that we can directly get the data
365 * (up-to-date) from memory.
367 vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
369 DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
370 return PTR_ERR(vaddr);
373 log->relay.buf_addr = vaddr;
378 static void guc_log_unmap(struct intel_guc_log *log)
380 lockdep_assert_held(&log->relay.lock);
382 i915_gem_object_unpin_map(log->vma->obj);
383 log->relay.buf_addr = NULL;
386 void intel_guc_log_init_early(struct intel_guc_log *log)
388 mutex_init(&log->relay.lock);
389 INIT_WORK(&log->relay.flush_work, capture_logs_work);
392 static int guc_log_relay_create(struct intel_guc_log *log)
394 struct intel_guc *guc = log_to_guc(log);
395 struct drm_i915_private *dev_priv = guc_to_i915(guc);
396 struct rchan *guc_log_relay_chan;
397 size_t n_subbufs, subbuf_size;
400 lockdep_assert_held(&log->relay.lock);
402 /* Keep the size of sub buffers same as shared log buffer */
403 subbuf_size = log->vma->size;
406 * Store up to 8 snapshots, which is large enough to buffer sufficient
407 * boot time logs and provides enough leeway to User, in terms of
408 * latency, for consuming the logs from relay. Also doesn't take
409 * up too much memory.
413 guc_log_relay_chan = relay_open("guc_log",
414 dev_priv->drm.primary->debugfs_root,
415 subbuf_size, n_subbufs,
416 &relay_callbacks, dev_priv);
417 if (!guc_log_relay_chan) {
418 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
424 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
425 log->relay.channel = guc_log_relay_chan;
430 static void guc_log_relay_destroy(struct intel_guc_log *log)
432 lockdep_assert_held(&log->relay.lock);
434 relay_close(log->relay.channel);
435 log->relay.channel = NULL;
438 static void guc_log_capture_logs(struct intel_guc_log *log)
440 struct intel_guc *guc = log_to_guc(log);
441 struct drm_i915_private *dev_priv = guc_to_i915(guc);
442 intel_wakeref_t wakeref;
444 guc_read_update_log_buffer(log);
447 * Generally device is expected to be active only at this
448 * time, so get/put should be really quick.
450 with_intel_runtime_pm(dev_priv, wakeref)
451 guc_action_flush_log_complete(guc);
454 int intel_guc_log_create(struct intel_guc_log *log)
456 struct intel_guc *guc = log_to_guc(log);
457 struct i915_vma *vma;
461 GEM_BUG_ON(log->vma);
464 * GuC Log buffer Layout
466 * +===============================+ 00B
467 * | Crash dump state header |
468 * +-------------------------------+ 32B
469 * | DPC state header |
470 * +-------------------------------+ 64B
471 * | ISR state header |
472 * +-------------------------------+ 96B
474 * +===============================+ PAGE_SIZE (4KB)
475 * | Crash Dump logs |
476 * +===============================+ + CRASH_SIZE
478 * +===============================+ + DPC_SIZE
480 * +===============================+ + ISR_SIZE
482 guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
485 vma = intel_guc_allocate_vma(guc, guc_log_size);
493 log->level = i915_modparams.guc_log_level;
498 DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
502 void intel_guc_log_destroy(struct intel_guc_log *log)
504 i915_vma_unpin_and_release(&log->vma, 0);
507 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
509 struct intel_guc *guc = log_to_guc(log);
510 struct drm_i915_private *dev_priv = guc_to_i915(guc);
511 intel_wakeref_t wakeref;
514 BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
515 GEM_BUG_ON(!log->vma);
518 * GuC is recognizing log levels starting from 0 to max, we're using 0
519 * as indication that logging should be disabled.
521 if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
524 mutex_lock(&dev_priv->drm.struct_mutex);
526 if (log->level == level)
529 with_intel_runtime_pm(dev_priv, wakeref)
530 ret = guc_action_control_log(guc,
531 GUC_LOG_LEVEL_IS_VERBOSE(level),
532 GUC_LOG_LEVEL_IS_ENABLED(level),
533 GUC_LOG_LEVEL_TO_VERBOSITY(level));
535 DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
542 mutex_unlock(&dev_priv->drm.struct_mutex);
547 bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
549 return log->relay.buf_addr;
552 int intel_guc_log_relay_open(struct intel_guc_log *log)
556 mutex_lock(&log->relay.lock);
558 if (intel_guc_log_relay_enabled(log)) {
564 * We require SSE 4.1 for fast reads from the GuC log buffer and
565 * it should be present on the chipsets supporting GuC based
568 if (!i915_has_memcpy_from_wc()) {
573 ret = guc_log_relay_create(log);
577 ret = guc_log_map(log);
581 mutex_unlock(&log->relay.lock);
583 guc_log_enable_flush_events(log);
586 * When GuC is logging without us relaying to userspace, we're ignoring
587 * the flush notification. This means that we need to unconditionally
588 * flush on relay enabling, since GuC only notifies us once.
590 queue_work(log->relay.flush_wq, &log->relay.flush_work);
595 guc_log_relay_destroy(log);
597 mutex_unlock(&log->relay.lock);
602 void intel_guc_log_relay_flush(struct intel_guc_log *log)
604 struct intel_guc *guc = log_to_guc(log);
605 struct drm_i915_private *i915 = guc_to_i915(guc);
606 intel_wakeref_t wakeref;
609 * Before initiating the forceful flush, wait for any pending/ongoing
610 * flush to complete otherwise forceful flush may not actually happen.
612 flush_work(&log->relay.flush_work);
614 with_intel_runtime_pm(i915, wakeref)
615 guc_action_flush_log(guc);
617 /* GuC would have updated log buffer by now, so capture it */
618 guc_log_capture_logs(log);
621 void intel_guc_log_relay_close(struct intel_guc_log *log)
623 guc_log_disable_flush_events(log);
624 flush_work(&log->relay.flush_work);
626 mutex_lock(&log->relay.lock);
627 GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
629 guc_log_relay_destroy(log);
630 mutex_unlock(&log->relay.lock);
633 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
635 queue_work(log->relay.flush_wq, &log->relay.flush_work);