2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
25 #include <linux/reboot.h>
29 #include "ppatomctrl.h"
32 #include "ivsrcid/thm/irqsrcs_thm_9_0.h"
33 #include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
34 #include "ivsrcid/ivsrcid_vislands30.h"
36 uint8_t convert_to_vid(uint16_t vddc)
38 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
41 uint16_t convert_to_vddc(uint8_t vid)
43 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
46 int phm_copy_clock_limits_array(
47 struct pp_hwmgr *hwmgr,
48 uint32_t **pptable_info_array,
49 const uint32_t *pptable_array,
50 uint32_t power_saving_clock_count)
52 uint32_t array_size, i;
55 array_size = sizeof(uint32_t) * power_saving_clock_count;
56 table = kzalloc(array_size, GFP_KERNEL);
60 for (i = 0; i < power_saving_clock_count; i++)
61 table[i] = le32_to_cpu(pptable_array[i]);
63 *pptable_info_array = table;
68 int phm_copy_overdrive_settings_limits_array(
69 struct pp_hwmgr *hwmgr,
70 uint32_t **pptable_info_array,
71 const uint32_t *pptable_array,
72 uint32_t od_setting_count)
74 uint32_t array_size, i;
77 array_size = sizeof(uint32_t) * od_setting_count;
78 table = kzalloc(array_size, GFP_KERNEL);
82 for (i = 0; i < od_setting_count; i++)
83 table[i] = le32_to_cpu(pptable_array[i]);
85 *pptable_info_array = table;
90 uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
95 shift = (offset % 4) << 3;
96 if (size == sizeof(uint8_t))
98 else if (size == sizeof(uint16_t))
99 mask = 0xFFFF << shift;
101 original_data &= ~mask;
102 original_data |= (field << shift);
103 return original_data;
107 * Returns once the part of the register indicated by the mask has
108 * reached the given value.
110 int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
111 uint32_t value, uint32_t mask)
116 if (hwmgr == NULL || hwmgr->device == NULL) {
117 pr_err("Invalid Hardware Manager!");
121 for (i = 0; i < hwmgr->usec_timeout; i++) {
122 cur_value = cgs_read_register(hwmgr->device, index);
123 if ((cur_value & mask) == (value & mask))
128 /* timeout means wrong logic*/
129 if (i == hwmgr->usec_timeout)
136 * Returns once the part of the register indicated by the mask has
137 * reached the given value.The indirect space is described by giving
138 * the memory-mapped index of the indirect index register.
140 int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
141 uint32_t indirect_port,
146 if (hwmgr == NULL || hwmgr->device == NULL) {
147 pr_err("Invalid Hardware Manager!");
151 cgs_write_register(hwmgr->device, indirect_port, index);
152 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
155 int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
157 uint32_t value, uint32_t mask)
162 if (hwmgr == NULL || hwmgr->device == NULL)
165 for (i = 0; i < hwmgr->usec_timeout; i++) {
166 cur_value = cgs_read_register(hwmgr->device,
168 if ((cur_value & mask) != (value & mask))
173 /* timeout means wrong logic */
174 if (i == hwmgr->usec_timeout)
179 int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
180 uint32_t indirect_port,
185 if (hwmgr == NULL || hwmgr->device == NULL)
188 cgs_write_register(hwmgr->device, indirect_port, index);
189 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
193 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
195 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
198 bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
200 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
204 int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
209 struct pp_atomctrl_voltage_table *table;
211 PP_ASSERT_WITH_CODE((NULL != vol_table),
212 "Voltage Table empty.", return -EINVAL);
214 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
220 table->mask_low = vol_table->mask_low;
221 table->phase_delay = vol_table->phase_delay;
223 for (i = 0; i < vol_table->count; i++) {
224 vvalue = vol_table->entries[i].value;
227 for (j = 0; j < table->count; j++) {
228 if (vvalue == table->entries[j].value) {
235 table->entries[table->count].value = vvalue;
236 table->entries[table->count].smio_low =
237 vol_table->entries[i].smio_low;
242 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
248 int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
249 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
254 PP_ASSERT_WITH_CODE((0 != dep_table->count),
255 "Voltage Dependency Table empty.", return -EINVAL);
257 PP_ASSERT_WITH_CODE((NULL != vol_table),
258 "vol_table empty.", return -EINVAL);
260 vol_table->mask_low = 0;
261 vol_table->phase_delay = 0;
262 vol_table->count = dep_table->count;
264 for (i = 0; i < dep_table->count; i++) {
265 vol_table->entries[i].value = dep_table->entries[i].mvdd;
266 vol_table->entries[i].smio_low = 0;
269 result = phm_trim_voltage_table(vol_table);
270 PP_ASSERT_WITH_CODE((0 == result),
271 "Failed to trim MVDD table.", return result);
276 int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
277 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
282 PP_ASSERT_WITH_CODE((0 != dep_table->count),
283 "Voltage Dependency Table empty.", return -EINVAL);
285 PP_ASSERT_WITH_CODE((NULL != vol_table),
286 "vol_table empty.", return -EINVAL);
288 vol_table->mask_low = 0;
289 vol_table->phase_delay = 0;
290 vol_table->count = dep_table->count;
292 for (i = 0; i < dep_table->count; i++) {
293 vol_table->entries[i].value = dep_table->entries[i].vddci;
294 vol_table->entries[i].smio_low = 0;
297 result = phm_trim_voltage_table(vol_table);
298 PP_ASSERT_WITH_CODE((0 == result),
299 "Failed to trim VDDCI table.", return result);
304 int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
305 phm_ppt_v1_voltage_lookup_table *lookup_table)
309 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
310 "Voltage Lookup Table empty.", return -EINVAL);
312 PP_ASSERT_WITH_CODE((NULL != vol_table),
313 "vol_table empty.", return -EINVAL);
315 vol_table->mask_low = 0;
316 vol_table->phase_delay = 0;
318 vol_table->count = lookup_table->count;
320 for (i = 0; i < vol_table->count; i++) {
321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
322 vol_table->entries[i].smio_low = 0;
328 void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
329 struct pp_atomctrl_voltage_table *vol_table)
331 unsigned int i, diff;
333 if (vol_table->count <= max_vol_steps)
336 diff = vol_table->count - max_vol_steps;
338 for (i = 0; i < max_vol_steps; i++)
339 vol_table->entries[i] = vol_table->entries[i + diff];
341 vol_table->count = max_vol_steps;
346 int phm_reset_single_dpm_table(void *table,
347 uint32_t count, int max)
351 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
353 dpm_table->count = count > max ? max : count;
355 for (i = 0; i < dpm_table->count; i++)
356 dpm_table->dpm_level[i].enabled = false;
361 void phm_setup_pcie_table_entry(
363 uint32_t index, uint32_t pcie_gen,
366 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
367 dpm_table->dpm_level[index].value = pcie_gen;
368 dpm_table->dpm_level[index].param1 = pcie_lanes;
369 dpm_table->dpm_level[index].enabled = 1;
372 int32_t phm_get_dpm_level_enable_mask_value(void *table)
376 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
378 for (i = dpm_table->count; i > 0; i--) {
380 if (dpm_table->dpm_level[i - 1].enabled)
389 uint8_t phm_get_voltage_index(
390 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
392 uint8_t count = (uint8_t) (lookup_table->count);
395 PP_ASSERT_WITH_CODE((NULL != lookup_table),
396 "Lookup Table empty.", return 0);
397 PP_ASSERT_WITH_CODE((0 != count),
398 "Lookup Table empty.", return 0);
400 for (i = 0; i < lookup_table->count; i++) {
401 /* find first voltage equal or bigger than requested */
402 if (lookup_table->entries[i].us_vdd >= voltage)
405 /* voltage is bigger than max voltage in the table */
409 uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
412 uint8_t count = (uint8_t) (voltage_table->count);
415 PP_ASSERT_WITH_CODE((NULL != voltage_table),
416 "Voltage Table empty.", return 0;);
417 PP_ASSERT_WITH_CODE((0 != count),
418 "Voltage Table empty.", return 0;);
420 for (i = 0; i < count; i++) {
421 /* find first voltage bigger than requested */
422 if (voltage_table->entries[i].value >= voltage)
426 /* voltage is bigger than max voltage in the table */
430 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
434 for (i = 0; i < vddci_table->count; i++) {
435 if (vddci_table->entries[i].value >= vddci)
436 return vddci_table->entries[i].value;
439 pr_debug("vddci is larger than max value in vddci_table\n");
440 return vddci_table->entries[i-1].value;
443 int phm_find_boot_level(void *table,
444 uint32_t value, uint32_t *boot_level)
446 int result = -EINVAL;
448 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
450 for (i = 0; i < dpm_table->count; i++) {
451 if (value == dpm_table->dpm_level[i].value) {
460 int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
461 phm_ppt_v1_voltage_lookup_table *lookup_table,
462 uint16_t virtual_voltage_id, int32_t *sclk)
466 struct phm_ppt_v1_information *table_info =
467 (struct phm_ppt_v1_information *)(hwmgr->pptable);
469 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
471 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
472 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
473 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
474 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
478 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
479 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
483 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
489 * Initialize Dynamic State Adjustment Rule Settings
491 * @param hwmgr the address of the powerplay hardware manager.
493 int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
495 struct phm_clock_voltage_dependency_table *table_clk_vlt;
496 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
498 /* initialize vddc_dep_on_dal_pwrl table */
499 table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 4),
502 if (NULL == table_clk_vlt) {
503 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
506 table_clk_vlt->count = 4;
507 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
508 table_clk_vlt->entries[0].v = 0;
509 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
510 table_clk_vlt->entries[1].v = 720;
511 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
512 table_clk_vlt->entries[2].v = 810;
513 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
514 table_clk_vlt->entries[3].v = 900;
515 if (pptable_info != NULL)
516 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
517 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
523 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
527 while (0 == (mask & (1 << level)))
533 void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
535 struct phm_ppt_v1_information *table_info =
536 (struct phm_ppt_v1_information *)hwmgr->pptable;
537 struct phm_clock_voltage_dependency_table *table =
538 table_info->vddc_dep_on_dal_pwrl;
539 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
540 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
541 uint32_t req_vddc = 0, req_volt, i;
543 if (!table || table->count <= 0
544 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
545 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
548 for (i = 0; i < table->count; i++) {
549 if (dal_power_level == table->entries[i].clk) {
550 req_vddc = table->entries[i].v;
555 vddc_table = table_info->vdd_dep_on_sclk;
556 for (i = 0; i < vddc_table->count; i++) {
557 if (req_vddc <= vddc_table->entries[i].vddc) {
558 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
559 smum_send_msg_to_smc_with_parameter(hwmgr,
560 PPSMC_MSG_VddC_Request,
566 pr_err("DAL requested level can not"
567 " found a available voltage in VDDC DPM Table \n");
570 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
571 uint32_t sclk, uint16_t id, uint16_t *voltage)
576 if (hwmgr->chip_id < CHIP_TONGA) {
577 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
578 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
579 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
580 if (*voltage >= 2000 || *voltage == 0)
583 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
584 *voltage = (uint16_t)(vol/100);
590 int phm_irq_process(struct amdgpu_device *adev,
591 struct amdgpu_irq_src *source,
592 struct amdgpu_iv_entry *entry)
594 uint32_t client_id = entry->client_id;
595 uint32_t src_id = entry->src_id;
597 if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
598 if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
599 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
601 * SW CTF just occurred.
602 * Try to do a graceful shutdown to prevent further damage.
604 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
605 orderly_poweroff(true);
606 } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
607 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
608 else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
609 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
611 * HW CTF just occurred. Shutdown to prevent further damage.
613 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
614 orderly_poweroff(true);
616 } else if (client_id == SOC15_IH_CLIENTID_THM) {
618 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
620 * SW CTF just occurred.
621 * Try to do a graceful shutdown to prevent further damage.
623 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
624 orderly_poweroff(true);
626 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
627 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
628 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
630 * HW CTF just occurred. Shutdown to prevent further damage.
632 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
633 orderly_poweroff(true);
639 static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
640 .process = phm_irq_process,
643 int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
645 struct amdgpu_irq_src *source =
646 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
651 source->funcs = &smu9_irq_funcs;
653 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
654 SOC15_IH_CLIENTID_THM,
655 THM_9_0__SRCID__THM_DIG_THERM_L2H,
657 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
658 SOC15_IH_CLIENTID_THM,
659 THM_9_0__SRCID__THM_DIG_THERM_H2L,
662 /* Register CTF(GPIO_19) interrupt */
663 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
664 SOC15_IH_CLIENTID_ROM_SMUIO,
665 SMUIO_9_0__SRCID__SMUIO_GPIO19,
671 void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
672 uint8_t *frev, uint8_t *crev)
674 struct amdgpu_device *adev = dev;
677 if (amdgpu_atom_parse_data_header(
678 adev->mode_info.atom_context, table, size,
679 frev, crev, &data_start))
680 return (uint8_t *)adev->mode_info.atom_context->bios +
686 int smu_get_voltage_dependency_table_ppt_v1(
687 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
688 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
691 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
692 "Voltage Lookup Table empty",
695 dep_table->count = allowed_dep_table->count;
696 for (i=0; i<dep_table->count; i++) {
697 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
698 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
699 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
700 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
701 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
702 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
703 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
704 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
705 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
706 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
712 int smu_set_watermarks_for_clocks_ranges(void *wt_table,
713 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
716 struct watermarks *table = wt_table;
718 if (!table || !wm_with_clock_ranges)
721 if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
724 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
725 table->WatermarkRow[1][i].MinClock =
726 cpu_to_le16((uint16_t)
727 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
729 table->WatermarkRow[1][i].MaxClock =
730 cpu_to_le16((uint16_t)
731 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
733 table->WatermarkRow[1][i].MinUclk =
734 cpu_to_le16((uint16_t)
735 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
737 table->WatermarkRow[1][i].MaxUclk =
738 cpu_to_le16((uint16_t)
739 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
741 table->WatermarkRow[1][i].WmSetting = (uint8_t)
742 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
745 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
746 table->WatermarkRow[0][i].MinClock =
747 cpu_to_le16((uint16_t)
748 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
750 table->WatermarkRow[0][i].MaxClock =
751 cpu_to_le16((uint16_t)
752 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
754 table->WatermarkRow[0][i].MinUclk =
755 cpu_to_le16((uint16_t)
756 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
758 table->WatermarkRow[0][i].MaxUclk =
759 cpu_to_le16((uint16_t)
760 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
762 table->WatermarkRow[0][i].WmSetting = (uint8_t)
763 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;