2 * Copyright © 2014-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/debugfs.h>
27 #include "intel_guc_log.h"
30 static void guc_log_capture_logs(struct intel_guc_log *log);
33 * DOC: GuC firmware log
35 * Firmware log is enabled by setting i915.guc_log_level to the positive level.
36 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
37 * i915_guc_load_status will print out firmware loading status and scratch
41 static int guc_action_flush_log_complete(struct intel_guc *guc)
44 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
47 return intel_guc_send(guc, action, ARRAY_SIZE(action));
50 static int guc_action_flush_log(struct intel_guc *guc)
53 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
57 return intel_guc_send(guc, action, ARRAY_SIZE(action));
60 static int guc_action_control_log(struct intel_guc *guc, bool enable,
61 bool default_logging, u32 verbosity)
64 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
65 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
66 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
67 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
70 GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
72 return intel_guc_send(guc, action, ARRAY_SIZE(action));
75 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
77 return container_of(log, struct intel_guc, log);
80 static void guc_log_enable_flush_events(struct intel_guc_log *log)
82 intel_guc_enable_msg(log_to_guc(log),
83 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
84 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
87 static void guc_log_disable_flush_events(struct intel_guc_log *log)
89 intel_guc_disable_msg(log_to_guc(log),
90 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
91 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
95 * Sub buffer switch callback. Called whenever relay has to switch to a new
96 * sub buffer, relay stays on the same sub buffer if 0 is returned.
98 static int subbuf_start_callback(struct rchan_buf *buf,
104 * Use no-overwrite mode by default, where relay will stop accepting
105 * new data if there are no empty sub buffers left.
106 * There is no strict synchronization enforced by relay between Consumer
107 * and Producer. In overwrite mode, there is a possibility of getting
108 * inconsistent/garbled data, the producer could be writing on to the
109 * same sub buffer from which Consumer is reading. This can't be avoided
110 * unless Consumer is fast enough and can always run in tandem with
113 if (relay_buf_full(buf))
120 * file_create() callback. Creates relay file in debugfs.
122 static struct dentry *create_buf_file_callback(const char *filename,
123 struct dentry *parent,
125 struct rchan_buf *buf,
128 struct dentry *buf_file;
131 * This to enable the use of a single buffer for the relay channel and
132 * correspondingly have a single file exposed to User, through which
133 * it can collect the logs in order without any post-processing.
134 * Need to set 'is_global' even if parent is NULL for early logging.
141 buf_file = debugfs_create_file(filename, mode,
142 parent, buf, &relay_file_operations);
143 if (IS_ERR(buf_file))
150 * file_remove() default callback. Removes relay file in debugfs.
152 static int remove_buf_file_callback(struct dentry *dentry)
154 debugfs_remove(dentry);
158 /* relay channel callbacks */
159 static struct rchan_callbacks relay_callbacks = {
160 .subbuf_start = subbuf_start_callback,
161 .create_buf_file = create_buf_file_callback,
162 .remove_buf_file = remove_buf_file_callback,
165 static void guc_move_to_next_buf(struct intel_guc_log *log)
168 * Make sure the updates made in the sub buffer are visible when
169 * Consumer sees the following update to offset inside the sub buffer.
173 /* All data has been written, so now move the offset of sub buffer. */
174 relay_reserve(log->relay.channel, log->vma->obj->base.size);
176 /* Switch to the next sub buffer */
177 relay_flush(log->relay.channel);
180 static void *guc_get_write_buffer(struct intel_guc_log *log)
183 * Just get the base address of a new sub buffer and copy data into it
184 * ourselves. NULL will be returned in no-overwrite mode, if all sub
185 * buffers are full. Could have used the relay_write() to indirectly
186 * copy the data, but that would have been bit convoluted, as we need to
187 * write to only certain locations inside a sub buffer which cannot be
188 * done without using relay_reserve() along with relay_write(). So its
189 * better to use relay_reserve() alone.
191 return relay_reserve(log->relay.channel, 0);
194 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
195 enum guc_log_buffer_type type,
196 unsigned int full_cnt)
198 unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
199 bool overflow = false;
201 if (full_cnt != prev_full_cnt) {
204 log->stats[type].overflow = full_cnt;
205 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
207 if (full_cnt < prev_full_cnt) {
208 /* buffer_full_cnt is a 4 bit counter */
209 log->stats[type].sampled_overflow += 16;
212 dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev,
213 "GuC log buffer overflow\n");
219 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
222 case GUC_ISR_LOG_BUFFER:
223 return ISR_BUFFER_SIZE;
224 case GUC_DPC_LOG_BUFFER:
225 return DPC_BUFFER_SIZE;
226 case GUC_CRASH_DUMP_LOG_BUFFER:
227 return CRASH_BUFFER_SIZE;
235 static void guc_read_update_log_buffer(struct intel_guc_log *log)
237 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
238 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
239 struct guc_log_buffer_state log_buf_state_local;
240 enum guc_log_buffer_type type;
241 void *src_data, *dst_data;
244 mutex_lock(&log->relay.lock);
246 if (WARN_ON(!intel_guc_log_relay_enabled(log)))
249 /* Get the pointer to shared GuC log buffer */
250 log_buf_state = src_data = log->relay.buf_addr;
252 /* Get the pointer to local buffer to store the logs */
253 log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
255 if (unlikely(!log_buf_snapshot_state)) {
257 * Used rate limited to avoid deluge of messages, logs might be
258 * getting consumed by User at a slow rate.
260 DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
261 log->relay.full_count++;
266 /* Actual logs are present from the 2nd page */
267 src_data += PAGE_SIZE;
268 dst_data += PAGE_SIZE;
270 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
272 * Make a copy of the state structure, inside GuC log buffer
273 * (which is uncached mapped), on the stack to avoid reading
274 * from it multiple times.
276 memcpy(&log_buf_state_local, log_buf_state,
277 sizeof(struct guc_log_buffer_state));
278 buffer_size = guc_get_log_buffer_size(type);
279 read_offset = log_buf_state_local.read_ptr;
280 write_offset = log_buf_state_local.sampled_write_ptr;
281 full_cnt = log_buf_state_local.buffer_full_cnt;
283 /* Bookkeeping stuff */
284 log->stats[type].flush += log_buf_state_local.flush_to_file;
285 new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
287 /* Update the state of shared log buffer */
288 log_buf_state->read_ptr = write_offset;
289 log_buf_state->flush_to_file = 0;
292 /* First copy the state structure in snapshot buffer */
293 memcpy(log_buf_snapshot_state, &log_buf_state_local,
294 sizeof(struct guc_log_buffer_state));
297 * The write pointer could have been updated by GuC firmware,
298 * after sending the flush interrupt to Host, for consistency
299 * set write pointer value to same value of sampled_write_ptr
300 * in the snapshot buffer.
302 log_buf_snapshot_state->write_ptr = write_offset;
303 log_buf_snapshot_state++;
305 /* Now copy the actual logs. */
306 if (unlikely(new_overflow)) {
307 /* copy the whole buffer in case of overflow */
309 write_offset = buffer_size;
310 } else if (unlikely((read_offset > buffer_size) ||
311 (write_offset > buffer_size))) {
312 DRM_ERROR("invalid log buffer state\n");
313 /* copy whole buffer as offsets are unreliable */
315 write_offset = buffer_size;
318 /* Just copy the newly written data */
319 if (read_offset > write_offset) {
320 i915_memcpy_from_wc(dst_data, src_data, write_offset);
321 bytes_to_copy = buffer_size - read_offset;
323 bytes_to_copy = write_offset - read_offset;
325 i915_memcpy_from_wc(dst_data + read_offset,
326 src_data + read_offset, bytes_to_copy);
328 src_data += buffer_size;
329 dst_data += buffer_size;
332 guc_move_to_next_buf(log);
335 mutex_unlock(&log->relay.lock);
338 static void capture_logs_work(struct work_struct *work)
340 struct intel_guc_log *log =
341 container_of(work, struct intel_guc_log, relay.flush_work);
343 guc_log_capture_logs(log);
346 static int guc_log_map(struct intel_guc_log *log)
350 lockdep_assert_held(&log->relay.lock);
356 * Create a WC (Uncached for read) vmalloc mapping of log
357 * buffer pages, so that we can directly get the data
358 * (up-to-date) from memory.
360 vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
362 return PTR_ERR(vaddr);
364 log->relay.buf_addr = vaddr;
369 static void guc_log_unmap(struct intel_guc_log *log)
371 lockdep_assert_held(&log->relay.lock);
373 i915_gem_object_unpin_map(log->vma->obj);
374 log->relay.buf_addr = NULL;
377 void intel_guc_log_init_early(struct intel_guc_log *log)
379 mutex_init(&log->relay.lock);
380 INIT_WORK(&log->relay.flush_work, capture_logs_work);
383 static int guc_log_relay_create(struct intel_guc_log *log)
385 struct intel_guc *guc = log_to_guc(log);
386 struct drm_i915_private *dev_priv = guc_to_i915(guc);
387 struct rchan *guc_log_relay_chan;
388 size_t n_subbufs, subbuf_size;
391 lockdep_assert_held(&log->relay.lock);
393 /* Keep the size of sub buffers same as shared log buffer */
394 subbuf_size = log->vma->size;
397 * Store up to 8 snapshots, which is large enough to buffer sufficient
398 * boot time logs and provides enough leeway to User, in terms of
399 * latency, for consuming the logs from relay. Also doesn't take
400 * up too much memory.
404 guc_log_relay_chan = relay_open("guc_log",
405 dev_priv->drm.primary->debugfs_root,
406 subbuf_size, n_subbufs,
407 &relay_callbacks, dev_priv);
408 if (!guc_log_relay_chan) {
409 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
415 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
416 log->relay.channel = guc_log_relay_chan;
421 static void guc_log_relay_destroy(struct intel_guc_log *log)
423 lockdep_assert_held(&log->relay.lock);
425 relay_close(log->relay.channel);
426 log->relay.channel = NULL;
429 static void guc_log_capture_logs(struct intel_guc_log *log)
431 struct intel_guc *guc = log_to_guc(log);
432 struct drm_i915_private *dev_priv = guc_to_i915(guc);
433 intel_wakeref_t wakeref;
435 guc_read_update_log_buffer(log);
438 * Generally device is expected to be active only at this
439 * time, so get/put should be really quick.
441 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
442 guc_action_flush_log_complete(guc);
445 int intel_guc_log_create(struct intel_guc_log *log)
447 struct intel_guc *guc = log_to_guc(log);
448 struct i915_vma *vma;
452 GEM_BUG_ON(log->vma);
455 * GuC Log buffer Layout
457 * +===============================+ 00B
458 * | Crash dump state header |
459 * +-------------------------------+ 32B
460 * | DPC state header |
461 * +-------------------------------+ 64B
462 * | ISR state header |
463 * +-------------------------------+ 96B
465 * +===============================+ PAGE_SIZE (4KB)
466 * | Crash Dump logs |
467 * +===============================+ + CRASH_SIZE
469 * +===============================+ + DPC_SIZE
471 * +===============================+ + ISR_SIZE
473 guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
476 vma = intel_guc_allocate_vma(guc, guc_log_size);
484 log->level = i915_modparams.guc_log_level;
489 DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
493 void intel_guc_log_destroy(struct intel_guc_log *log)
495 i915_vma_unpin_and_release(&log->vma, 0);
498 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
500 struct intel_guc *guc = log_to_guc(log);
501 struct drm_i915_private *dev_priv = guc_to_i915(guc);
502 intel_wakeref_t wakeref;
505 BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
506 GEM_BUG_ON(!log->vma);
509 * GuC is recognizing log levels starting from 0 to max, we're using 0
510 * as indication that logging should be disabled.
512 if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
515 mutex_lock(&dev_priv->drm.struct_mutex);
517 if (log->level == level)
520 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
521 ret = guc_action_control_log(guc,
522 GUC_LOG_LEVEL_IS_VERBOSE(level),
523 GUC_LOG_LEVEL_IS_ENABLED(level),
524 GUC_LOG_LEVEL_TO_VERBOSITY(level));
526 DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
533 mutex_unlock(&dev_priv->drm.struct_mutex);
538 bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
540 return log->relay.buf_addr;
543 int intel_guc_log_relay_open(struct intel_guc_log *log)
547 mutex_lock(&log->relay.lock);
549 if (intel_guc_log_relay_enabled(log)) {
555 * We require SSE 4.1 for fast reads from the GuC log buffer and
556 * it should be present on the chipsets supporting GuC based
559 if (!i915_has_memcpy_from_wc()) {
564 ret = guc_log_relay_create(log);
568 ret = guc_log_map(log);
572 mutex_unlock(&log->relay.lock);
574 guc_log_enable_flush_events(log);
577 * When GuC is logging without us relaying to userspace, we're ignoring
578 * the flush notification. This means that we need to unconditionally
579 * flush on relay enabling, since GuC only notifies us once.
581 queue_work(log->relay.flush_wq, &log->relay.flush_work);
586 guc_log_relay_destroy(log);
588 mutex_unlock(&log->relay.lock);
593 void intel_guc_log_relay_flush(struct intel_guc_log *log)
595 struct intel_guc *guc = log_to_guc(log);
596 struct drm_i915_private *i915 = guc_to_i915(guc);
597 intel_wakeref_t wakeref;
600 * Before initiating the forceful flush, wait for any pending/ongoing
601 * flush to complete otherwise forceful flush may not actually happen.
603 flush_work(&log->relay.flush_work);
605 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
606 guc_action_flush_log(guc);
608 /* GuC would have updated log buffer by now, so capture it */
609 guc_log_capture_logs(log);
612 void intel_guc_log_relay_close(struct intel_guc_log *log)
614 struct intel_guc *guc = log_to_guc(log);
615 struct drm_i915_private *i915 = guc_to_i915(guc);
617 guc_log_disable_flush_events(log);
618 synchronize_irq(i915->drm.irq);
620 flush_work(&log->relay.flush_work);
622 mutex_lock(&log->relay.lock);
623 GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
625 guc_log_relay_destroy(log);
626 mutex_unlock(&log->relay.lock);
629 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
631 queue_work(log->relay.flush_wq, &log->relay.flush_work);