2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 * Created on: Aug 30, 2016
30 #include <linux/delay.h>
32 #include "dm_services.h"
36 #include "dc_dmub_srv.h"
38 static inline void submit_dmub_read_modify_write(
39 struct dc_reg_helper_state *offload,
40 const struct dc_context *ctx)
42 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
45 offload->should_burst_write =
46 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
47 cmd_buf->header.payload_bytes =
48 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
50 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
51 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
53 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
55 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
57 memset(cmd_buf, 0, sizeof(*cmd_buf));
59 offload->reg_seq_count = 0;
60 offload->same_addr_count = 0;
63 static inline void submit_dmub_burst_write(
64 struct dc_reg_helper_state *offload,
65 const struct dc_context *ctx)
67 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
70 cmd_buf->header.payload_bytes =
71 sizeof(uint32_t) * offload->reg_seq_count;
73 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
74 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
76 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
78 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
80 memset(cmd_buf, 0, sizeof(*cmd_buf));
82 offload->reg_seq_count = 0;
85 static inline void submit_dmub_reg_wait(
86 struct dc_reg_helper_state *offload,
87 const struct dc_context *ctx)
89 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
92 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
93 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
95 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
97 memset(cmd_buf, 0, sizeof(*cmd_buf));
98 offload->reg_seq_count = 0;
100 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
103 struct dc_reg_value_masks {
108 struct dc_reg_sequence {
110 struct dc_reg_value_masks value_masks;
113 static inline void set_reg_field_value_masks(
114 struct dc_reg_value_masks *field_value_mask,
121 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift));
122 field_value_mask->mask = field_value_mask->mask | mask;
125 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
126 uint32_t addr, int n,
127 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
130 uint32_t shift, mask, field_value;
133 /* gather all bits value/mask getting updated in this register */
134 set_reg_field_value_masks(field_value_mask,
135 field_value1, mask1, shift1);
138 shift = va_arg(ap, uint32_t);
139 mask = va_arg(ap, uint32_t);
140 field_value = va_arg(ap, uint32_t);
142 set_reg_field_value_masks(field_value_mask,
143 field_value, mask, shift);
148 static void dmub_flush_buffer_execute(
149 struct dc_reg_helper_state *offload,
150 const struct dc_context *ctx)
152 submit_dmub_read_modify_write(offload, ctx);
153 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
156 static void dmub_flush_burst_write_buffer_execute(
157 struct dc_reg_helper_state *offload,
158 const struct dc_context *ctx)
160 submit_dmub_burst_write(offload, ctx);
161 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
164 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
167 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
168 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
170 /* flush command if buffer is full */
171 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
172 dmub_flush_burst_write_buffer_execute(offload, ctx);
174 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
175 addr != cmd_buf->addr) {
176 dmub_flush_burst_write_buffer_execute(offload, ctx);
180 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
181 cmd_buf->addr = addr;
182 cmd_buf->write_values[offload->reg_seq_count] = reg_val;
183 offload->reg_seq_count++;
188 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
189 struct dc_reg_value_masks *field_value_mask)
191 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
192 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
193 struct dmub_cmd_read_modify_write_sequence *seq;
195 /* flush command if buffer is full */
196 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
197 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
198 dmub_flush_buffer_execute(offload, ctx);
200 if (offload->should_burst_write) {
201 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
202 return field_value_mask->value;
204 offload->should_burst_write = false;
208 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
209 seq = &cmd_buf->seq[offload->reg_seq_count];
211 if (offload->reg_seq_count) {
212 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
213 offload->same_addr_count++;
215 offload->same_addr_count = 0;
219 seq->modify_mask = field_value_mask->mask;
220 seq->modify_value = field_value_mask->value;
221 offload->reg_seq_count++;
223 return field_value_mask->value;
226 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
227 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
229 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
230 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
232 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
233 cmd_buf->reg_wait.addr = addr;
234 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
235 cmd_buf->reg_wait.mask = mask;
236 cmd_buf->reg_wait.time_out_us = time_out_us;
239 uint32_t generic_reg_update_ex(const struct dc_context *ctx,
240 uint32_t addr, int n,
241 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
244 struct dc_reg_value_masks field_value_mask = {0};
248 va_start(ap, field_value1);
250 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
256 ctx->dmub_srv->reg_helper_offload.gather_in_progress)
257 return dmub_reg_value_pack(ctx, addr, &field_value_mask);
258 /* todo: return void so we can decouple code running in driver from register states */
260 /* mmio write directly */
261 reg_val = dm_read_reg(ctx, addr);
262 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
263 dm_write_reg(ctx, addr, reg_val);
267 uint32_t generic_reg_set_ex(const struct dc_context *ctx,
268 uint32_t addr, uint32_t reg_val, int n,
269 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
272 struct dc_reg_value_masks field_value_mask = {0};
275 va_start(ap, field_value1);
277 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
283 /* mmio write directly */
284 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
287 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
288 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
289 /* todo: return void so we can decouple code running in driver from register states */
292 dm_write_reg(ctx, addr, reg_val);
296 uint32_t dm_read_reg_func(
297 const struct dc_context *ctx,
299 const char *func_name)
302 #ifdef DM_CHECK_ADDR_0
304 DC_ERR("invalid register read; address = 0\n");
310 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
311 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
316 value = cgs_read_register(ctx->cgs_device, address);
317 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
322 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
323 uint8_t shift, uint32_t mask, uint32_t *field_value)
325 uint32_t reg_val = dm_read_reg(ctx, addr);
326 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
330 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
331 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
332 uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
334 uint32_t reg_val = dm_read_reg(ctx, addr);
335 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
336 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
340 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
341 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
342 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
343 uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
345 uint32_t reg_val = dm_read_reg(ctx, addr);
346 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
347 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
348 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
352 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
353 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
354 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
355 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
356 uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
358 uint32_t reg_val = dm_read_reg(ctx, addr);
359 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
360 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
361 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
362 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
366 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
367 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
368 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
369 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
370 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
371 uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
373 uint32_t reg_val = dm_read_reg(ctx, addr);
374 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
375 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
376 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
377 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
378 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
382 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
383 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
384 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
385 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
386 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
387 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
388 uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
390 uint32_t reg_val = dm_read_reg(ctx, addr);
391 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
392 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
393 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
394 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
395 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
396 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
400 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
401 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
402 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
403 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
404 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
405 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
406 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
407 uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
409 uint32_t reg_val = dm_read_reg(ctx, addr);
410 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
411 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
412 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
413 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
414 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
415 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
416 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
420 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
421 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
422 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
423 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
424 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
425 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
426 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
427 uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
428 uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
430 uint32_t reg_val = dm_read_reg(ctx, addr);
431 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
432 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
433 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
434 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
435 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
436 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
437 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
438 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
441 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
442 * compiler won't be able to check for size match and is prone to stack corruption type of bugs
444 uint32_t generic_reg_get(const struct dc_context *ctx,
445 uint32_t addr, int n, ...)
447 uint32_t shift, mask;
448 uint32_t *field_value;
452 reg_val = dm_read_reg(ctx, addr);
458 shift = va_arg(ap, uint32_t);
459 mask = va_arg(ap, uint32_t);
460 field_value = va_arg(ap, uint32_t *);
462 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
472 void generic_reg_wait(const struct dc_context *ctx,
473 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
474 unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
475 const char *func_name, int line)
477 uint32_t field_value;
482 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
483 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
484 delay_between_poll_us * time_out_num_tries);
489 * Something is terribly wrong if time out is > 3000ms.
490 * 3000ms is the maximum time needed for SMU to pass values back.
491 * This value comes from experiments.
494 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
496 for (i = 0; i <= time_out_num_tries; i++) {
498 if (delay_between_poll_us >= 1000)
499 msleep(delay_between_poll_us/1000);
500 else if (delay_between_poll_us > 0)
501 udelay(delay_between_poll_us);
504 reg_val = dm_read_reg(ctx, addr);
506 field_value = get_reg_field_value_ex(reg_val, mask, shift);
508 if (field_value == condition_value) {
509 if (i * delay_between_poll_us > 1000 &&
510 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
511 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
512 delay_between_poll_us * i / 1000,
518 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
519 delay_between_poll_us, time_out_num_tries,
522 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
526 void generic_write_indirect_reg(const struct dc_context *ctx,
527 uint32_t addr_index, uint32_t addr_data,
528 uint32_t index, uint32_t data)
530 dm_write_reg(ctx, addr_index, index);
531 dm_write_reg(ctx, addr_data, data);
534 uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
535 uint32_t addr_index, uint32_t addr_data,
540 // when reg read, there should not be any offload.
542 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
546 dm_write_reg(ctx, addr_index, index);
547 value = dm_read_reg(ctx, addr_data);
553 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
554 uint32_t addr_index, uint32_t addr_data,
555 uint32_t index, uint32_t reg_val, int n,
556 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
559 uint32_t shift, mask, field_value;
564 va_start(ap, field_value1);
566 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
569 shift = va_arg(ap, uint32_t);
570 mask = va_arg(ap, uint32_t);
571 field_value = va_arg(ap, uint32_t);
573 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
577 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
583 void reg_sequence_start_gather(const struct dc_context *ctx)
585 /* if reg sequence is supported and enabled, set flag to
586 * indicate we want to have REG_SET, REG_UPDATE macro build
587 * reg sequence command buffer rather than MMIO directly.
590 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
591 struct dc_reg_helper_state *offload =
592 &ctx->dmub_srv->reg_helper_offload;
594 /* caller sequence mismatch. need to debug caller. offload will not work!!! */
595 ASSERT(!offload->gather_in_progress);
597 offload->gather_in_progress = true;
601 void reg_sequence_start_execute(const struct dc_context *ctx)
603 struct dc_reg_helper_state *offload;
608 offload = &ctx->dmub_srv->reg_helper_offload;
610 if (offload && offload->gather_in_progress) {
611 offload->gather_in_progress = false;
612 offload->should_burst_write = false;
613 switch (offload->cmd_data.cmd_common.header.type) {
614 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
615 submit_dmub_read_modify_write(offload, ctx);
617 case DMUB_CMD__REG_REG_WAIT:
618 submit_dmub_reg_wait(offload, ctx);
620 case DMUB_CMD__REG_SEQ_BURST_WRITE:
621 submit_dmub_burst_write(offload, ctx);
627 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
631 void reg_sequence_wait_done(const struct dc_context *ctx)
633 /* callback to DM to poll for last submission done*/
634 struct dc_reg_helper_state *offload;
639 offload = &ctx->dmub_srv->reg_helper_offload;
642 ctx->dc->debug.dmub_offload_enabled &&
643 !ctx->dc->debug.dmcub_emulation) {
644 dc_dmub_srv_wait_idle(ctx->dmub_srv);