1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016-2019 Intel Corporation
6 #include <linux/circ_buf.h>
7 #include <linux/ktime.h>
8 #include <linux/time64.h>
9 #include <linux/timekeeping.h>
12 #include "intel_guc_ct.h"
13 #include "gt/intel_gt.h"
15 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
17 return container_of(ct, struct intel_guc, ct);
20 static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
22 return guc_to_gt(ct_to_guc(ct));
25 static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
27 return ct_to_gt(ct)->i915;
30 static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
32 return &ct_to_i915(ct)->drm;
35 #define CT_ERROR(_ct, _fmt, ...) \
36 drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
37 #ifdef CONFIG_DRM_I915_DEBUG_GUC
38 #define CT_DEBUG(_ct, _fmt, ...) \
39 drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
41 #define CT_DEBUG(...) do { } while (0)
43 #define CT_PROBE_ERROR(_ct, _fmt, ...) \
44 i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
49 * We allocate single blob to hold both CTB descriptors and buffers:
51 * +--------+-----------------------------------------------+------+
52 * | offset | contents | size |
53 * +========+===============================================+======+
54 * | 0x0000 | H2G `CTB Descriptor`_ (send) | |
55 * +--------+-----------------------------------------------+ 4K |
56 * | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
57 * +--------+-----------------------------------------------+------+
58 * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
60 * +--------+-----------------------------------------------+------+
61 * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
63 * +--------+-----------------------------------------------+------+
65 * Size of each `CT Buffer`_ must be multiple of 4K.
66 * We don't expect too many messages in flight at any time, unless we are
67 * using the GuC submission. In that case each request requires a minimum
68 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
69 * enough space to avoid backpressure on the driver. We increase the size
70 * of the receive buffer (relative to the send) to ensure a G2H response
71 * CTB has a landing spot.
73 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
74 #define CTB_H2G_BUFFER_SIZE (SZ_4K)
75 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
76 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
79 struct list_head link;
86 struct ct_incoming_msg {
87 struct list_head link;
92 enum { CTB_SEND = 0, CTB_RECV = 1 };
94 enum { CTB_OWNER_HOST = 0 };
96 static void ct_receive_tasklet_func(struct tasklet_struct *t);
97 static void ct_incoming_request_worker_func(struct work_struct *w);
100 * intel_guc_ct_init_early - Initialize CT state without requiring device access
101 * @ct: pointer to CT struct
103 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
105 spin_lock_init(&ct->ctbs.send.lock);
106 spin_lock_init(&ct->ctbs.recv.lock);
107 spin_lock_init(&ct->requests.lock);
108 INIT_LIST_HEAD(&ct->requests.pending);
109 INIT_LIST_HEAD(&ct->requests.incoming);
110 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
111 tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
112 init_waitqueue_head(&ct->wq);
115 static inline const char *guc_ct_buffer_type_to_str(u32 type)
118 case GUC_CTB_TYPE_HOST2GUC:
120 case GUC_CTB_TYPE_GUC2HOST:
127 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
129 memset(desc, 0, sizeof(*desc));
132 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
139 space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
140 atomic_set(&ctb->space, space);
142 guc_ct_buffer_desc_init(ctb->desc);
145 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
146 struct guc_ct_buffer_desc *desc,
147 u32 *cmds, u32 size_in_bytes, u32 resv_space)
149 GEM_BUG_ON(size_in_bytes % 4);
153 ctb->size = size_in_bytes / 4;
154 ctb->resv_space = resv_space / 4;
156 guc_ct_buffer_reset(ctb);
159 static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
160 u32 desc_addr, u32 buff_addr, u32 size)
162 u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = {
163 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
164 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
165 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB),
166 FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) |
167 FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type),
168 FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
169 FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
173 GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
174 GEM_BUG_ON(size % SZ_4K);
176 /* CT registration must go over MMIO */
177 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
179 return ret > 0 ? -EPROTO : ret;
182 static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
183 u32 desc_addr, u32 buff_addr, u32 size)
187 err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO);
191 err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
192 desc_addr, buff_addr, size);
194 CT_ERROR(ct, "Failed to register %s buffer (%pe)\n",
195 guc_ct_buffer_type_to_str(type), ERR_PTR(err));
199 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
201 u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = {
202 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
203 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
204 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
205 FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
209 GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
211 /* CT deregistration must go over MMIO */
212 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
214 return ret > 0 ? -EPROTO : ret;
217 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
219 int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
222 CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n",
223 guc_ct_buffer_type_to_str(type), ERR_PTR(err));
228 * intel_guc_ct_init - Init buffer-based communication
229 * @ct: pointer to CT struct
231 * Allocate memory required for buffer-based communication.
233 * Return: 0 on success, a negative errno code on failure.
235 int intel_guc_ct_init(struct intel_guc_ct *ct)
237 struct intel_guc *guc = ct_to_guc(ct);
238 struct guc_ct_buffer_desc *desc;
246 err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
252 blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
253 err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
255 CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
256 blob_size, ERR_PTR(err));
260 CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
262 /* store pointers to desc and cmds for send ctb */
264 cmds = blob + 2 * CTB_DESC_SIZE;
265 cmds_size = CTB_H2G_BUFFER_SIZE;
267 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
268 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
271 guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
273 /* store pointers to desc and cmds for recv ctb */
274 desc = blob + CTB_DESC_SIZE;
275 cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
276 cmds_size = CTB_G2H_BUFFER_SIZE;
277 resv_space = G2H_ROOM_BUFFER_SIZE;
278 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
279 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
282 guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
288 * intel_guc_ct_fini - Fini buffer-based communication
289 * @ct: pointer to CT struct
291 * Deallocate memory required for buffer-based communication.
293 void intel_guc_ct_fini(struct intel_guc_ct *ct)
295 GEM_BUG_ON(ct->enabled);
297 tasklet_kill(&ct->receive_tasklet);
298 i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
299 memset(ct, 0, sizeof(*ct));
303 * intel_guc_ct_enable - Enable buffer based command transport.
304 * @ct: pointer to CT struct
306 * Return: 0 on success, a negative errno code on failure.
308 int intel_guc_ct_enable(struct intel_guc_ct *ct)
310 struct intel_guc *guc = ct_to_guc(ct);
311 u32 base, desc, cmds;
315 GEM_BUG_ON(ct->enabled);
317 /* vma should be already allocated and map'ed */
318 GEM_BUG_ON(!ct->vma);
319 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
320 base = intel_guc_ggtt_offset(guc, ct->vma);
322 /* blob should start with send descriptor */
323 blob = __px_vaddr(ct->vma->obj);
324 GEM_BUG_ON(blob != ct->ctbs.send.desc);
326 /* (re)initialize descriptors */
327 guc_ct_buffer_reset(&ct->ctbs.send);
328 guc_ct_buffer_reset(&ct->ctbs.recv);
331 * Register both CT buffers starting with RECV buffer.
332 * Descriptors are in first half of the blob.
334 desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
335 cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
336 err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST,
337 desc, cmds, ct->ctbs.recv.size * 4);
342 desc = base + ptrdiff(ct->ctbs.send.desc, blob);
343 cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
344 err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC,
345 desc, cmds, ct->ctbs.send.size * 4);
351 ct->stall_time = KTIME_MAX;
356 ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
358 CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
363 * intel_guc_ct_disable - Disable buffer based command transport.
364 * @ct: pointer to CT struct
366 void intel_guc_ct_disable(struct intel_guc_ct *ct)
368 struct intel_guc *guc = ct_to_guc(ct);
370 GEM_BUG_ON(!ct->enabled);
374 if (intel_guc_is_fw_running(guc)) {
375 ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC);
376 ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
380 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
382 /* For now it's trivial */
383 return ++ct->requests.last_fence;
386 static int ct_write(struct intel_guc_ct *ct,
388 u32 len /* in dwords */,
389 u32 fence, u32 flags)
391 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
392 struct guc_ct_buffer_desc *desc = ctb->desc;
393 u32 tail = ctb->tail;
394 u32 size = ctb->size;
398 u32 *cmds = ctb->cmds;
401 if (unlikely(desc->status))
404 GEM_BUG_ON(tail > size);
406 #ifdef CONFIG_DRM_I915_DEBUG_GUC
407 if (unlikely(tail != READ_ONCE(desc->tail))) {
408 CT_ERROR(ct, "Tail was modified %u != %u\n",
410 desc->status |= GUC_CTB_STATUS_MISMATCH;
413 if (unlikely(READ_ONCE(desc->head) >= size)) {
414 CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
416 desc->status |= GUC_CTB_STATUS_OVERFLOW;
422 * dw0: CT header (including fence)
423 * dw1: HXG header (including action code)
426 header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
427 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
428 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
430 type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
431 GUC_HXG_TYPE_REQUEST;
432 hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
433 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
434 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
436 CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
437 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
440 tail = (tail + 1) % size;
443 tail = (tail + 1) % size;
445 for (i = 1; i < len; i++) {
446 cmds[tail] = action[i];
447 tail = (tail + 1) % size;
449 GEM_BUG_ON(tail > size);
452 * make sure H2G buffer update and LRC tail update (if this triggering a
453 * submission) are visible before updating the descriptor tail
455 intel_guc_write_barrier(ct_to_guc(ct));
457 /* update local copies */
459 GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
460 atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
462 /* now update descriptor */
463 WRITE_ONCE(desc->tail, tail);
468 CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
469 desc->head, desc->tail, desc->status);
475 * wait_for_ct_request_update - Wait for CT request state update.
476 * @req: pointer to pending request
477 * @status: placeholder for status
479 * For each sent request, GuC shall send back CT response message.
480 * Our message handler will update status of tracked request once
481 * response message with given fence is received. Wait here and
482 * check for valid response status value.
485 * * 0 response received (status is valid)
486 * * -ETIMEDOUT no response within hardcoded timeout
488 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
493 * Fast commands should complete in less than 10us, so sample quickly
494 * up to that length of time, then switch to a slower sleep-wait loop.
495 * No GuC command should ever take longer than 10ms but many GuC
496 * commands can be inflight at time, so use a 1s timeout on the slower
499 #define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
500 #define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
502 (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
504 err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
506 err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
509 *status = req->status;
513 #define GUC_CTB_TIMEOUT_MS 1500
514 static inline bool ct_deadlocked(struct intel_guc_ct *ct)
516 long timeout = GUC_CTB_TIMEOUT_MS;
517 bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
520 struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
521 struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
523 CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
524 ktime_ms_delta(ktime_get(), ct->stall_time),
525 send->status, recv->status);
526 ct->ctbs.send.broken = true;
532 static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
534 struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
537 * We leave a certain amount of space in the G2H CTB buffer for
538 * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
540 return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
543 static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
545 lockdep_assert_held(&ct->ctbs.send.lock);
547 GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
550 atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
553 static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
555 atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
558 static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
560 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
561 struct guc_ct_buffer_desc *desc = ctb->desc;
565 if (atomic_read(&ctb->space) >= len_dw)
568 head = READ_ONCE(desc->head);
569 if (unlikely(head > ctb->size)) {
570 CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
572 desc->status |= GUC_CTB_STATUS_OVERFLOW;
577 space = CIRC_SPACE(ctb->tail, head, ctb->size);
578 atomic_set(&ctb->space, space);
580 return space >= len_dw;
583 static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
585 lockdep_assert_held(&ct->ctbs.send.lock);
587 if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
588 if (ct->stall_time == KTIME_MAX)
589 ct->stall_time = ktime_get();
591 if (unlikely(ct_deadlocked(ct)))
597 ct->stall_time = KTIME_MAX;
601 #define G2H_LEN_DW(f) ({ \
602 typeof(f) f_ = (f); \
603 FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
604 FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
605 GUC_CTB_HXG_MSG_MIN_LEN : 0; \
607 static int ct_send_nb(struct intel_guc_ct *ct,
612 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
613 unsigned long spin_flags;
614 u32 g2h_len_dw = G2H_LEN_DW(flags);
618 spin_lock_irqsave(&ctb->lock, spin_flags);
620 ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
624 fence = ct_get_next_fence(ct);
625 ret = ct_write(ct, action, len, fence, flags);
629 g2h_reserve_space(ct, g2h_len_dw);
630 intel_guc_notify(ct_to_guc(ct));
633 spin_unlock_irqrestore(&ctb->lock, spin_flags);
638 static int ct_send(struct intel_guc_ct *ct,
642 u32 response_buf_size,
645 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
646 struct ct_request request;
648 unsigned int sleep_period_ms = 1;
652 GEM_BUG_ON(!ct->enabled);
654 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
655 GEM_BUG_ON(!response_buf && response_buf_size);
659 * We use a lazy spin wait loop here as we believe that if the CT
660 * buffers are sized correctly the flow control condition should be
661 * rare. Reserving the maximum size in the G2H credits as we don't know
662 * how big the response is going to be.
665 spin_lock_irqsave(&ctb->lock, flags);
666 if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
667 !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
668 if (ct->stall_time == KTIME_MAX)
669 ct->stall_time = ktime_get();
670 spin_unlock_irqrestore(&ctb->lock, flags);
672 if (unlikely(ct_deadlocked(ct)))
675 if (msleep_interruptible(sleep_period_ms))
677 sleep_period_ms = sleep_period_ms << 1;
682 ct->stall_time = KTIME_MAX;
684 fence = ct_get_next_fence(ct);
685 request.fence = fence;
687 request.response_len = response_buf_size;
688 request.response_buf = response_buf;
690 spin_lock(&ct->requests.lock);
691 list_add_tail(&request.link, &ct->requests.pending);
692 spin_unlock(&ct->requests.lock);
694 err = ct_write(ct, action, len, fence, 0);
695 g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
697 spin_unlock_irqrestore(&ctb->lock, flags);
702 intel_guc_notify(ct_to_guc(ct));
704 err = wait_for_ct_request_update(&request, status);
705 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
707 CT_ERROR(ct, "No response for request %#x (fence %u)\n",
708 action[0], request.fence);
712 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
718 /* There shall be no data in the status */
719 WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
720 /* Return actual response len */
721 err = request.response_len;
723 /* There shall be no response payload */
724 WARN_ON(request.response_len);
725 /* Return data decoded from the status dword */
726 err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
730 spin_lock_irqsave(&ct->requests.lock, flags);
731 list_del(&request.link);
732 spin_unlock_irqrestore(&ct->requests.lock, flags);
738 * Command Transport (CT) buffer based GuC send function.
740 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
741 u32 *response_buf, u32 response_buf_size, u32 flags)
743 u32 status = ~0; /* undefined */
746 if (unlikely(!ct->enabled)) {
747 struct intel_guc *guc = ct_to_guc(ct);
748 struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
750 WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
754 if (unlikely(ct->ctbs.send.broken))
757 if (flags & INTEL_GUC_CT_SEND_NB)
758 return ct_send_nb(ct, action, len, flags);
760 ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
761 if (unlikely(ret < 0)) {
762 CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
763 action[0], ERR_PTR(ret), status);
764 } else if (unlikely(ret)) {
765 CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
766 action[0], ret, ret);
772 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
774 struct ct_incoming_msg *msg;
776 msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
778 msg->size = num_dwords;
782 static void ct_free_msg(struct ct_incoming_msg *msg)
788 * Return: number available remaining dwords to read (0 if empty)
789 * or a negative error code on failure
791 static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
793 struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
794 struct guc_ct_buffer_desc *desc = ctb->desc;
795 u32 head = ctb->head;
796 u32 tail = READ_ONCE(desc->tail);
797 u32 size = ctb->size;
798 u32 *cmds = ctb->cmds;
804 if (unlikely(ctb->broken))
807 if (unlikely(desc->status))
810 GEM_BUG_ON(head > size);
812 #ifdef CONFIG_DRM_I915_DEBUG_GUC
813 if (unlikely(head != READ_ONCE(desc->head))) {
814 CT_ERROR(ct, "Head was modified %u != %u\n",
816 desc->status |= GUC_CTB_STATUS_MISMATCH;
820 if (unlikely(tail >= size)) {
821 CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
823 desc->status |= GUC_CTB_STATUS_OVERFLOW;
827 /* tail == head condition indicates empty */
828 available = tail - head;
829 if (unlikely(available == 0)) {
834 /* beware of buffer wrap case */
835 if (unlikely(available < 0))
837 CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
838 GEM_BUG_ON(available < 0);
841 head = (head + 1) % size;
843 /* message len with header */
844 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
845 if (unlikely(len > (u32)available)) {
846 CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
848 4 * (head + available - 1 > size ?
849 size - head : available - 1), &cmds[head],
850 4 * (head + available - 1 > size ?
851 available - 1 - size + head : 0), &cmds[0]);
852 desc->status |= GUC_CTB_STATUS_UNDERFLOW;
856 *msg = ct_alloc_msg(len);
858 CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
860 4 * (head + available - 1 > size ?
861 size - head : available - 1), &cmds[head],
862 4 * (head + available - 1 > size ?
863 available - 1 - size + head : 0), &cmds[0]);
867 (*msg)->msg[0] = header;
869 for (i = 1; i < len; i++) {
870 (*msg)->msg[i] = cmds[head];
871 head = (head + 1) % size;
873 CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
875 /* update local copies */
878 /* now update descriptor */
879 WRITE_ONCE(desc->head, head);
881 return available - len;
884 CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
885 desc->head, desc->tail, desc->status);
890 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
892 u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
893 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
894 const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
895 const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
896 u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
897 struct ct_request *req;
902 GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
903 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
904 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
905 FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
907 CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
909 spin_lock_irqsave(&ct->requests.lock, flags);
910 list_for_each_entry(req, &ct->requests.pending, link) {
911 if (unlikely(fence != req->fence)) {
912 CT_DEBUG(ct, "request %u awaits response\n",
916 if (unlikely(datalen > req->response_len)) {
917 CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
918 req->fence, datalen, req->response_len);
919 datalen = min(datalen, req->response_len);
923 memcpy(req->response_buf, data, 4 * datalen);
924 req->response_len = datalen;
925 WRITE_ONCE(req->status, hxg[0]);
930 CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
931 CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
932 ct->requests.last_fence);
933 list_for_each_entry(req, &ct->requests.pending, link)
934 CT_ERROR(ct, "request %u awaits response\n",
938 spin_unlock_irqrestore(&ct->requests.lock, flags);
943 ct_free_msg(response);
947 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
949 struct intel_guc *guc = ct_to_guc(ct);
952 u32 hxg_len, action, len;
955 hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
956 hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
957 payload = &hxg[GUC_HXG_MSG_MIN_LEN];
958 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
959 len = hxg_len - GUC_HXG_MSG_MIN_LEN;
961 CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
964 case INTEL_GUC_ACTION_DEFAULT:
965 ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
967 case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
968 ret = intel_guc_deregister_done_process_msg(guc, payload,
971 case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
972 ret = intel_guc_sched_done_process_msg(guc, payload, len);
974 case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
975 ret = intel_guc_context_reset_process_msg(guc, payload, len);
977 case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
978 ret = intel_guc_engine_failure_process_msg(guc, payload, len);
986 CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
987 action, ERR_PTR(ret));
991 ct_free_msg(request);
995 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
998 struct ct_incoming_msg *request;
1002 spin_lock_irqsave(&ct->requests.lock, flags);
1003 request = list_first_entry_or_null(&ct->requests.incoming,
1004 struct ct_incoming_msg, link);
1006 list_del(&request->link);
1007 done = !!list_empty(&ct->requests.incoming);
1008 spin_unlock_irqrestore(&ct->requests.lock, flags);
1013 err = ct_process_request(ct, request);
1014 if (unlikely(err)) {
1015 CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1016 ERR_PTR(err), 4 * request->size, request->msg);
1017 ct_free_msg(request);
1023 static void ct_incoming_request_worker_func(struct work_struct *w)
1025 struct intel_guc_ct *ct =
1026 container_of(w, struct intel_guc_ct, requests.worker);
1030 done = ct_process_incoming_requests(ct);
1034 static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
1036 const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
1037 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1038 unsigned long flags;
1040 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
1043 * Adjusting the space must be done in IRQ or deadlock can occur as the
1044 * CTB processing in the below workqueue can send CTBs which creates a
1045 * circular dependency if the space was returned there.
1048 case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1049 case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1050 g2h_release_space(ct, request->size);
1053 spin_lock_irqsave(&ct->requests.lock, flags);
1054 list_add_tail(&request->link, &ct->requests.incoming);
1055 spin_unlock_irqrestore(&ct->requests.lock, flags);
1057 queue_work(system_unbound_wq, &ct->requests.worker);
1061 static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1067 if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
1070 hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
1072 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1073 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1078 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1080 case GUC_HXG_TYPE_EVENT:
1081 err = ct_handle_event(ct, msg);
1083 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1084 case GUC_HXG_TYPE_RESPONSE_FAILURE:
1085 err = ct_handle_response(ct, msg);
1091 if (unlikely(err)) {
1093 CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
1094 ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
1099 static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1101 u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
1104 if (format == GUC_CTB_FORMAT_HXG)
1105 err = ct_handle_hxg(ct, msg);
1109 if (unlikely(err)) {
1110 CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1111 ERR_PTR(err), 4 * msg->size, msg->msg);
1117 * Return: number available remaining dwords to read (0 if empty)
1118 * or a negative error code on failure
1120 static int ct_receive(struct intel_guc_ct *ct)
1122 struct ct_incoming_msg *msg = NULL;
1123 unsigned long flags;
1126 spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
1127 ret = ct_read(ct, &msg);
1128 spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
1133 ct_handle_msg(ct, msg);
1138 static void ct_try_receive_message(struct intel_guc_ct *ct)
1142 if (GEM_WARN_ON(!ct->enabled))
1145 ret = ct_receive(ct);
1147 tasklet_hi_schedule(&ct->receive_tasklet);
1150 static void ct_receive_tasklet_func(struct tasklet_struct *t)
1152 struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
1154 ct_try_receive_message(ct);
1158 * When we're communicating with the GuC over CT, GuC uses events
1159 * to notify us about new messages being posted on the RECV buffer.
1161 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
1163 if (unlikely(!ct->enabled)) {
1164 WARN(1, "Unexpected GuC event received while CT disabled!\n");
1168 ct_try_receive_message(ct);
1171 void intel_guc_ct_print_info(struct intel_guc_ct *ct,
1172 struct drm_printer *p)
1174 drm_printf(p, "CT %s\n", enableddisabled(ct->enabled));
1179 drm_printf(p, "H2G Space: %u\n",
1180 atomic_read(&ct->ctbs.send.space) * 4);
1181 drm_printf(p, "Head: %u\n",
1182 ct->ctbs.send.desc->head);
1183 drm_printf(p, "Tail: %u\n",
1184 ct->ctbs.send.desc->tail);
1185 drm_printf(p, "G2H Space: %u\n",
1186 atomic_read(&ct->ctbs.recv.space) * 4);
1187 drm_printf(p, "Head: %u\n",
1188 ct->ctbs.recv.desc->head);
1189 drm_printf(p, "Tail: %u\n",
1190 ct->ctbs.recv.desc->tail);