1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
4 #include <linux/completion.h>
5 #include <linux/circ_buf.h>
6 #include <linux/list.h>
9 #include "a6xx_gmu.xml.h"
12 #define HFI_MSG_ID(val) [val] = #val
14 static const char * const a6xx_hfi_msg_id[] = {
15 HFI_MSG_ID(HFI_H2F_MSG_INIT),
16 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
17 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
18 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
19 HFI_MSG_ID(HFI_H2F_MSG_TEST),
20 HFI_MSG_ID(HFI_H2F_MSG_START),
21 HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
22 HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
23 HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
27 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
29 struct a6xx_hfi_queue_header *header = queue->header;
30 u32 i, hdr, index = header->read_index;
32 if (header->read_index == header->write_index) {
33 header->rx_request = 1;
37 hdr = queue->data[index];
39 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
42 * If we are to assume that the GMU firmware is in fact a rational actor
43 * and is programmed to not send us a larger response than we expect
44 * then we can also assume that if the header size is unexpectedly large
45 * that it is due to memory corruption and/or hardware failure. In this
46 * case the only reasonable course of action is to BUG() to help harden
50 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
52 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
53 data[i] = queue->data[index];
54 index = (index + 1) % header->size;
58 index = ALIGN(index, 4) % header->size;
60 header->read_index = index;
61 return HFI_HEADER_SIZE(hdr);
64 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
65 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
67 struct a6xx_hfi_queue_header *header = queue->header;
68 u32 i, space, index = header->write_index;
70 spin_lock(&queue->lock);
72 space = CIRC_SPACE(header->write_index, header->read_index,
76 spin_unlock(&queue->lock);
80 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
82 for (i = 0; i < dwords; i++) {
83 queue->data[index] = data[i];
84 index = (index + 1) % header->size;
87 /* Cookify any non used data at the end of the write buffer */
89 for (; index % 4; index = (index + 1) % header->size)
90 queue->data[index] = 0xfafafafa;
93 header->write_index = index;
94 spin_unlock(&queue->lock);
96 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
100 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
101 u32 *payload, u32 payload_size)
103 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
107 /* Wait for a response */
108 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
109 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
112 DRM_DEV_ERROR(gmu->dev,
113 "Message %s id %d timed out waiting for response\n",
114 a6xx_hfi_msg_id[id], seqnum);
118 /* Clear the interrupt */
119 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
120 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
123 struct a6xx_hfi_msg_response resp;
125 /* Get the next packet */
126 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
129 /* If the queue is empty our response never made it */
131 DRM_DEV_ERROR(gmu->dev,
132 "The HFI response queue is unexpectedly empty\n");
137 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
138 struct a6xx_hfi_msg_error *error =
139 (struct a6xx_hfi_msg_error *) &resp;
141 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
146 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
147 DRM_DEV_ERROR(gmu->dev,
148 "Unexpected message id %d on the response queue\n",
149 HFI_HEADER_SEQNUM(resp.ret_header));
154 DRM_DEV_ERROR(gmu->dev,
155 "Message %s id %d returned error %d\n",
156 a6xx_hfi_msg_id[id], seqnum, resp.error);
160 /* All is well, copy over the buffer */
161 if (payload && payload_size)
162 memcpy(payload, resp.payload,
163 min_t(u32, payload_size, sizeof(resp.payload)));
169 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
170 void *data, u32 size, u32 *payload, u32 payload_size)
172 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
173 int ret, dwords = size >> 2;
176 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
178 /* First dword of the message is the message header - fill it in */
179 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
182 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
184 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
185 a6xx_hfi_msg_id[id], seqnum);
189 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
192 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
194 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
196 msg.dbg_buffer_addr = (u32) gmu->debug.iova;
197 msg.dbg_buffer_size = (u32) gmu->debug.size;
198 msg.boot_state = boot_state;
200 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
204 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
206 struct a6xx_hfi_msg_fw_version msg = { 0 };
208 /* Currently supporting version 1.10 */
209 msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
211 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
212 version, sizeof(*version));
215 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
217 struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
220 msg.num_gpu_levels = gmu->nr_gpu_freqs;
221 msg.num_gmu_levels = gmu->nr_gmu_freqs;
223 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
224 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
225 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
228 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
229 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
230 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
233 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
237 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
239 struct a6xx_hfi_msg_perf_table msg = { 0 };
242 msg.num_gpu_levels = gmu->nr_gpu_freqs;
243 msg.num_gmu_levels = gmu->nr_gmu_freqs;
245 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
246 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
247 msg.gx_votes[i].acd = 0xffffffff;
248 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
251 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
252 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
253 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
256 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
260 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
262 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
263 msg->bw_level_num = 1;
265 msg->ddr_cmds_num = 3;
266 msg->ddr_wait_bitmask = 0x01;
268 msg->ddr_cmds_addrs[0] = 0x50000;
269 msg->ddr_cmds_addrs[1] = 0x5003c;
270 msg->ddr_cmds_addrs[2] = 0x5000c;
272 msg->ddr_cmds_data[0][0] = 0x40000000;
273 msg->ddr_cmds_data[0][1] = 0x40000000;
274 msg->ddr_cmds_data[0][2] = 0x40000000;
277 * These are the CX (CNOC) votes - these are used by the GMU but the
278 * votes are known and fixed for the target
280 msg->cnoc_cmds_num = 1;
281 msg->cnoc_wait_bitmask = 0x01;
283 msg->cnoc_cmds_addrs[0] = 0x5007c;
284 msg->cnoc_cmds_data[0][0] = 0x40000000;
285 msg->cnoc_cmds_data[1][0] = 0x60000001;
288 static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
290 msg->bw_level_num = 13;
292 msg->ddr_cmds_num = 3;
293 msg->ddr_wait_bitmask = 0x0;
295 msg->ddr_cmds_addrs[0] = 0x50000;
296 msg->ddr_cmds_addrs[1] = 0x50004;
297 msg->ddr_cmds_addrs[2] = 0x50080;
299 msg->ddr_cmds_data[0][0] = 0x40000000;
300 msg->ddr_cmds_data[0][1] = 0x40000000;
301 msg->ddr_cmds_data[0][2] = 0x40000000;
302 msg->ddr_cmds_data[1][0] = 0x6000030c;
303 msg->ddr_cmds_data[1][1] = 0x600000db;
304 msg->ddr_cmds_data[1][2] = 0x60000008;
305 msg->ddr_cmds_data[2][0] = 0x60000618;
306 msg->ddr_cmds_data[2][1] = 0x600001b6;
307 msg->ddr_cmds_data[2][2] = 0x60000008;
308 msg->ddr_cmds_data[3][0] = 0x60000925;
309 msg->ddr_cmds_data[3][1] = 0x60000291;
310 msg->ddr_cmds_data[3][2] = 0x60000008;
311 msg->ddr_cmds_data[4][0] = 0x60000dc1;
312 msg->ddr_cmds_data[4][1] = 0x600003dc;
313 msg->ddr_cmds_data[4][2] = 0x60000008;
314 msg->ddr_cmds_data[5][0] = 0x600010ad;
315 msg->ddr_cmds_data[5][1] = 0x600004ae;
316 msg->ddr_cmds_data[5][2] = 0x60000008;
317 msg->ddr_cmds_data[6][0] = 0x600014c3;
318 msg->ddr_cmds_data[6][1] = 0x600005d4;
319 msg->ddr_cmds_data[6][2] = 0x60000008;
320 msg->ddr_cmds_data[7][0] = 0x6000176a;
321 msg->ddr_cmds_data[7][1] = 0x60000693;
322 msg->ddr_cmds_data[7][2] = 0x60000008;
323 msg->ddr_cmds_data[8][0] = 0x60001f01;
324 msg->ddr_cmds_data[8][1] = 0x600008b5;
325 msg->ddr_cmds_data[8][2] = 0x60000008;
326 msg->ddr_cmds_data[9][0] = 0x60002940;
327 msg->ddr_cmds_data[9][1] = 0x60000b95;
328 msg->ddr_cmds_data[9][2] = 0x60000008;
329 msg->ddr_cmds_data[10][0] = 0x60002f68;
330 msg->ddr_cmds_data[10][1] = 0x60000d50;
331 msg->ddr_cmds_data[10][2] = 0x60000008;
332 msg->ddr_cmds_data[11][0] = 0x60003700;
333 msg->ddr_cmds_data[11][1] = 0x60000f71;
334 msg->ddr_cmds_data[11][2] = 0x60000008;
335 msg->ddr_cmds_data[12][0] = 0x60003fce;
336 msg->ddr_cmds_data[12][1] = 0x600011ea;
337 msg->ddr_cmds_data[12][2] = 0x60000008;
339 msg->cnoc_cmds_num = 1;
340 msg->cnoc_wait_bitmask = 0x0;
342 msg->cnoc_cmds_addrs[0] = 0x50054;
344 msg->cnoc_cmds_data[0][0] = 0x40000000;
347 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
350 * Send a single "off" entry just to get things running
353 msg->bw_level_num = 1;
355 msg->ddr_cmds_num = 3;
356 msg->ddr_wait_bitmask = 0x01;
358 msg->ddr_cmds_addrs[0] = 0x50000;
359 msg->ddr_cmds_addrs[1] = 0x5003c;
360 msg->ddr_cmds_addrs[2] = 0x5000c;
362 msg->ddr_cmds_data[0][0] = 0x40000000;
363 msg->ddr_cmds_data[0][1] = 0x40000000;
364 msg->ddr_cmds_data[0][2] = 0x40000000;
367 * These are the CX (CNOC) votes - these are used by the GMU but the
368 * votes are known and fixed for the target
370 msg->cnoc_cmds_num = 3;
371 msg->cnoc_wait_bitmask = 0x01;
373 msg->cnoc_cmds_addrs[0] = 0x50034;
374 msg->cnoc_cmds_addrs[1] = 0x5007c;
375 msg->cnoc_cmds_addrs[2] = 0x5004c;
377 msg->cnoc_cmds_data[0][0] = 0x40000000;
378 msg->cnoc_cmds_data[0][1] = 0x00000000;
379 msg->cnoc_cmds_data[0][2] = 0x40000000;
381 msg->cnoc_cmds_data[1][0] = 0x60000001;
382 msg->cnoc_cmds_data[1][1] = 0x20000001;
383 msg->cnoc_cmds_data[1][2] = 0x60000001;
386 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
389 * Send a single "off" entry just to get things running
392 msg->bw_level_num = 1;
394 msg->ddr_cmds_num = 3;
395 msg->ddr_wait_bitmask = 0x01;
397 msg->ddr_cmds_addrs[0] = 0x50000;
398 msg->ddr_cmds_addrs[1] = 0x50004;
399 msg->ddr_cmds_addrs[2] = 0x5007c;
401 msg->ddr_cmds_data[0][0] = 0x40000000;
402 msg->ddr_cmds_data[0][1] = 0x40000000;
403 msg->ddr_cmds_data[0][2] = 0x40000000;
406 * These are the CX (CNOC) votes - these are used by the GMU but the
407 * votes are known and fixed for the target
409 msg->cnoc_cmds_num = 1;
410 msg->cnoc_wait_bitmask = 0x01;
412 msg->cnoc_cmds_addrs[0] = 0x500a4;
413 msg->cnoc_cmds_data[0][0] = 0x40000000;
414 msg->cnoc_cmds_data[1][0] = 0x60000001;
417 static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
420 * Send a single "off" entry just to get things running
423 msg->bw_level_num = 1;
425 msg->ddr_cmds_num = 3;
426 msg->ddr_wait_bitmask = 0x01;
428 msg->ddr_cmds_addrs[0] = 0x50004;
429 msg->ddr_cmds_addrs[1] = 0x50000;
430 msg->ddr_cmds_addrs[2] = 0x500ac;
432 msg->ddr_cmds_data[0][0] = 0x40000000;
433 msg->ddr_cmds_data[0][1] = 0x40000000;
434 msg->ddr_cmds_data[0][2] = 0x40000000;
437 * These are the CX (CNOC) votes - these are used by the GMU but the
438 * votes are known and fixed for the target
440 msg->cnoc_cmds_num = 1;
441 msg->cnoc_wait_bitmask = 0x01;
443 msg->cnoc_cmds_addrs[0] = 0x5003c;
444 msg->cnoc_cmds_data[0][0] = 0x40000000;
445 msg->cnoc_cmds_data[1][0] = 0x60000001;
448 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
451 * Send a single "off" entry just to get things running
454 msg->bw_level_num = 1;
456 msg->ddr_cmds_num = 3;
457 msg->ddr_wait_bitmask = 0x01;
459 msg->ddr_cmds_addrs[0] = 0x50004;
460 msg->ddr_cmds_addrs[1] = 0x500a0;
461 msg->ddr_cmds_addrs[2] = 0x50000;
463 msg->ddr_cmds_data[0][0] = 0x40000000;
464 msg->ddr_cmds_data[0][1] = 0x40000000;
465 msg->ddr_cmds_data[0][2] = 0x40000000;
468 * These are the CX (CNOC) votes - these are used by the GMU but the
469 * votes are known and fixed for the target
471 msg->cnoc_cmds_num = 1;
472 msg->cnoc_wait_bitmask = 0x01;
474 msg->cnoc_cmds_addrs[0] = 0x50070;
475 msg->cnoc_cmds_data[0][0] = 0x40000000;
476 msg->cnoc_cmds_data[1][0] = 0x60000001;
479 static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
482 * Send a single "off" entry just to get things running
485 msg->bw_level_num = 1;
487 msg->ddr_cmds_num = 3;
488 msg->ddr_wait_bitmask = 0x07;
490 msg->ddr_cmds_addrs[0] = 0x50004;
491 msg->ddr_cmds_addrs[1] = 0x50000;
492 msg->ddr_cmds_addrs[2] = 0x50088;
494 msg->ddr_cmds_data[0][0] = 0x40000000;
495 msg->ddr_cmds_data[0][1] = 0x40000000;
496 msg->ddr_cmds_data[0][2] = 0x40000000;
499 * These are the CX (CNOC) votes - these are used by the GMU but the
500 * votes are known and fixed for the target
502 msg->cnoc_cmds_num = 1;
503 msg->cnoc_wait_bitmask = 0x01;
505 msg->cnoc_cmds_addrs[0] = 0x5006c;
506 msg->cnoc_cmds_data[0][0] = 0x40000000;
507 msg->cnoc_cmds_data[1][0] = 0x60000001;
509 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
511 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
512 msg->bw_level_num = 1;
514 msg->ddr_cmds_num = 3;
515 msg->ddr_wait_bitmask = 0x07;
517 msg->ddr_cmds_addrs[0] = 0x50000;
518 msg->ddr_cmds_addrs[1] = 0x5005c;
519 msg->ddr_cmds_addrs[2] = 0x5000c;
521 msg->ddr_cmds_data[0][0] = 0x40000000;
522 msg->ddr_cmds_data[0][1] = 0x40000000;
523 msg->ddr_cmds_data[0][2] = 0x40000000;
526 * These are the CX (CNOC) votes. This is used but the values for the
527 * sdm845 GMU are known and fixed so we can hard code them.
530 msg->cnoc_cmds_num = 3;
531 msg->cnoc_wait_bitmask = 0x05;
533 msg->cnoc_cmds_addrs[0] = 0x50034;
534 msg->cnoc_cmds_addrs[1] = 0x5007c;
535 msg->cnoc_cmds_addrs[2] = 0x5004c;
537 msg->cnoc_cmds_data[0][0] = 0x40000000;
538 msg->cnoc_cmds_data[0][1] = 0x00000000;
539 msg->cnoc_cmds_data[0][2] = 0x40000000;
541 msg->cnoc_cmds_data[1][0] = 0x60000001;
542 msg->cnoc_cmds_data[1][1] = 0x20000001;
543 msg->cnoc_cmds_data[1][2] = 0x60000001;
547 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
549 struct a6xx_hfi_msg_bw_table msg = { 0 };
550 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
551 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
553 if (adreno_is_a618(adreno_gpu))
554 a618_build_bw_table(&msg);
555 else if (adreno_is_a619(adreno_gpu))
556 a619_build_bw_table(&msg);
557 else if (adreno_is_a640_family(adreno_gpu))
558 a640_build_bw_table(&msg);
559 else if (adreno_is_a650(adreno_gpu))
560 a650_build_bw_table(&msg);
561 else if (adreno_is_7c3(adreno_gpu))
562 adreno_7c3_build_bw_table(&msg);
563 else if (adreno_is_a660(adreno_gpu))
564 a660_build_bw_table(&msg);
565 else if (adreno_is_a690(adreno_gpu))
566 a690_build_bw_table(&msg);
568 a6xx_build_bw_table(&msg);
570 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
574 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
576 struct a6xx_hfi_msg_test msg = { 0 };
578 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
582 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
584 struct a6xx_hfi_msg_start msg = { 0 };
586 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
590 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
592 struct a6xx_hfi_msg_core_fw_start msg = { 0 };
594 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
595 sizeof(msg), NULL, 0);
598 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
600 struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
602 msg.ack_type = 1; /* blocking */
604 msg.bw = 0; /* TODO: bus scaling */
606 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
607 sizeof(msg), NULL, 0);
610 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
612 struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
614 /* TODO: should freq and bw fields be non-zero ? */
616 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
617 sizeof(msg), NULL, 0);
620 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
624 ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
628 ret = a6xx_hfi_get_fw_version(gmu, NULL);
633 * We have to get exchange version numbers per the sequence but at this
634 * point th kernel driver doesn't need to know the exact version of
638 ret = a6xx_hfi_send_perf_table_v1(gmu);
642 ret = a6xx_hfi_send_bw_table(gmu);
647 * Let the GMU know that there won't be any more HFI messages until next
650 a6xx_hfi_send_test(gmu);
655 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
660 return a6xx_hfi_start_v1(gmu, boot_state);
663 ret = a6xx_hfi_send_perf_table(gmu);
667 ret = a6xx_hfi_send_bw_table(gmu);
671 ret = a6xx_hfi_send_core_fw_start(gmu);
676 * Downstream driver sends this in its "a6xx_hw_init" equivalent,
677 * but seems to be no harm in sending it here
679 ret = a6xx_hfi_send_start(gmu);
686 void a6xx_hfi_stop(struct a6xx_gmu *gmu)
690 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
691 struct a6xx_hfi_queue *queue = &gmu->queues[i];
696 if (queue->header->read_index != queue->header->write_index)
697 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
699 queue->header->read_index = 0;
700 queue->header->write_index = 0;
702 memset(&queue->history, 0xff, sizeof(queue->history));
703 queue->history_idx = 0;
707 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
708 struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
711 spin_lock_init(&queue->lock);
712 queue->header = header;
714 atomic_set(&queue->seqnum, 0);
716 memset(&queue->history, 0xff, sizeof(queue->history));
717 queue->history_idx = 0;
719 /* Set up the shared memory header */
721 header->type = 10 << 8 | id;
723 header->size = SZ_4K >> 2;
724 header->msg_size = 0;
726 header->rx_watermark = 1;
727 header->tx_watermark = 1;
728 header->rx_request = 1;
729 header->tx_request = 0;
730 header->read_index = 0;
731 header->write_index = 0;
734 void a6xx_hfi_init(struct a6xx_gmu *gmu)
736 struct a6xx_gmu_bo *hfi = &gmu->hfi;
737 struct a6xx_hfi_queue_table_header *table = hfi->virt;
738 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
743 * The table size is the size of the table header plus all of the queue
746 table_size = sizeof(*table);
747 table_size += (ARRAY_SIZE(gmu->queues) *
748 sizeof(struct a6xx_hfi_queue_header));
751 table->size = table_size;
752 /* First queue header is located immediately after the table header */
753 table->qhdr0_offset = sizeof(*table) >> 2;
754 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
755 table->num_queues = ARRAY_SIZE(gmu->queues);
756 table->active_queues = ARRAY_SIZE(gmu->queues);
760 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
761 hfi->iova + offset, 0);
763 /* GMU response queue */
765 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
766 hfi->iova + offset, gmu->legacy ? 4 : 1);