1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #define pr_fmt(fmt) "habanalabs: " fmt
10 #include <uapi/misc/habanalabs.h>
11 #include "habanalabs.h"
13 #include <linux/kernel.h>
15 #include <linux/uaccess.h>
16 #include <linux/slab.h>
18 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
19 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
20 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
21 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
22 [HL_DEBUG_OP_FUNNEL] = 0,
23 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
24 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
25 [HL_DEBUG_OP_TIMESTAMP] = 0
29 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
31 struct hl_info_device_status dev_stat = {0};
32 u32 size = args->return_size;
33 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
35 if ((!size) || (!out))
38 dev_stat.status = hl_device_status(hdev);
40 return copy_to_user(out, &dev_stat,
41 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
44 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
46 struct hl_info_hw_ip_info hw_ip = {0};
47 u32 size = args->return_size;
48 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
49 struct asic_fixed_properties *prop = &hdev->asic_prop;
50 u64 sram_kmd_size, dram_kmd_size;
52 if ((!size) || (!out))
55 sram_kmd_size = (prop->sram_user_base_address -
56 prop->sram_base_address);
57 dram_kmd_size = (prop->dram_user_base_address -
58 prop->dram_base_address);
60 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
61 hw_ip.sram_base_address = prop->sram_user_base_address;
62 hw_ip.dram_base_address =
63 hdev->mmu_enable && prop->dram_supports_virtual_memory ?
64 prop->dmmu.start_addr : prop->dram_user_base_address;
65 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
66 hw_ip.sram_size = prop->sram_size - sram_kmd_size;
70 DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
71 prop->dram_page_size) *
74 hw_ip.dram_size = prop->dram_size - dram_kmd_size;
76 if (hw_ip.dram_size > PAGE_SIZE)
77 hw_ip.dram_enabled = 1;
78 hw_ip.dram_page_size = prop->dram_page_size;
79 hw_ip.num_of_events = prop->num_of_events;
81 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
82 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
84 memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
85 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
87 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
88 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
90 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
91 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
92 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
93 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
95 hw_ip.first_available_interrupt_id =
96 prop->first_available_user_msix_interrupt;
97 return copy_to_user(out, &hw_ip,
98 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
101 static int hw_events_info(struct hl_device *hdev, bool aggregate,
102 struct hl_info_args *args)
104 u32 size, max_size = args->return_size;
105 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
108 if ((!max_size) || (!out))
111 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
113 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
116 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
118 struct hl_device *hdev = hpriv->hdev;
119 struct hl_info_dram_usage dram_usage = {0};
120 u32 max_size = args->return_size;
121 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
122 struct asic_fixed_properties *prop = &hdev->asic_prop;
125 if ((!max_size) || (!out))
128 dram_kmd_size = (prop->dram_user_base_address -
129 prop->dram_base_address);
130 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
131 atomic64_read(&hdev->dram_used_mem);
133 dram_usage.ctx_dram_mem =
134 atomic64_read(&hpriv->ctx->dram_phys_mem);
136 return copy_to_user(out, &dram_usage,
137 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
140 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
142 struct hl_info_hw_idle hw_idle = {0};
143 u32 max_size = args->return_size;
144 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
146 if ((!max_size) || (!out))
149 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
150 hw_idle.busy_engines_mask_ext,
151 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
152 hw_idle.busy_engines_mask =
153 lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
155 return copy_to_user(out, &hw_idle,
156 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
159 static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
161 struct hl_debug_params *params;
162 void *input = NULL, *output = NULL;
165 params = kzalloc(sizeof(*params), GFP_KERNEL);
169 params->reg_idx = args->reg_idx;
170 params->enable = args->enable;
171 params->op = args->op;
173 if (args->input_ptr && args->input_size) {
174 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
180 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
183 dev_err(hdev->dev, "failed to copy input debug data\n");
187 params->input = input;
190 if (args->output_ptr && args->output_size) {
191 output = kzalloc(args->output_size, GFP_KERNEL);
197 params->output = output;
198 params->output_size = args->output_size;
201 rc = hdev->asic_funcs->debug_coresight(hdev, params);
204 "debug coresight operation failed %d\n", rc);
208 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
209 output, args->output_size)) {
210 dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
224 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
226 struct hl_info_device_utilization device_util = {0};
227 u32 max_size = args->return_size;
228 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
230 if ((!max_size) || (!out))
233 if ((args->period_ms < 100) || (args->period_ms > 1000) ||
234 (args->period_ms % 100)) {
236 "period %u must be between 100 - 1000 and must be divisible by 100\n",
241 device_util.utilization = hl_device_utilization(hdev, args->period_ms);
243 return copy_to_user(out, &device_util,
244 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
247 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
249 struct hl_info_clk_rate clk_rate = {0};
250 u32 max_size = args->return_size;
251 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
254 if ((!max_size) || (!out))
257 rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
258 &clk_rate.max_clk_rate_mhz);
262 return copy_to_user(out, &clk_rate,
263 min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
266 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
268 struct hl_info_reset_count reset_count = {0};
269 u32 max_size = args->return_size;
270 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
272 if ((!max_size) || (!out))
275 reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
276 reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
278 return copy_to_user(out, &reset_count,
279 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
282 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
284 struct hl_info_time_sync time_sync = {0};
285 u32 max_size = args->return_size;
286 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
288 if ((!max_size) || (!out))
291 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
292 time_sync.host_time = ktime_get_raw_ns();
294 return copy_to_user(out, &time_sync,
295 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
298 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
300 struct hl_device *hdev = hpriv->hdev;
301 struct hl_info_pci_counters pci_counters = {0};
302 u32 max_size = args->return_size;
303 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
306 if ((!max_size) || (!out))
309 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
313 return copy_to_user(out, &pci_counters,
314 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
317 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
319 struct hl_device *hdev = hpriv->hdev;
320 struct hl_info_clk_throttle clk_throttle = {0};
321 u32 max_size = args->return_size;
322 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
324 if ((!max_size) || (!out))
327 clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
329 return copy_to_user(out, &clk_throttle,
330 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
333 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
335 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
336 struct hl_info_cs_counters cs_counters = {0};
337 struct hl_device *hdev = hpriv->hdev;
338 struct hl_cs_counters_atomic *cntr;
339 u32 max_size = args->return_size;
341 cntr = &hdev->aggregated_cs_counters;
343 if ((!max_size) || (!out))
346 cs_counters.total_out_of_mem_drop_cnt =
347 atomic64_read(&cntr->out_of_mem_drop_cnt);
348 cs_counters.total_parsing_drop_cnt =
349 atomic64_read(&cntr->parsing_drop_cnt);
350 cs_counters.total_queue_full_drop_cnt =
351 atomic64_read(&cntr->queue_full_drop_cnt);
352 cs_counters.total_device_in_reset_drop_cnt =
353 atomic64_read(&cntr->device_in_reset_drop_cnt);
354 cs_counters.total_max_cs_in_flight_drop_cnt =
355 atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
356 cs_counters.total_validation_drop_cnt =
357 atomic64_read(&cntr->validation_drop_cnt);
360 cs_counters.ctx_out_of_mem_drop_cnt =
362 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
363 cs_counters.ctx_parsing_drop_cnt =
365 &hpriv->ctx->cs_counters.parsing_drop_cnt);
366 cs_counters.ctx_queue_full_drop_cnt =
368 &hpriv->ctx->cs_counters.queue_full_drop_cnt);
369 cs_counters.ctx_device_in_reset_drop_cnt =
371 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
372 cs_counters.ctx_max_cs_in_flight_drop_cnt =
374 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
375 cs_counters.ctx_validation_drop_cnt =
377 &hpriv->ctx->cs_counters.validation_drop_cnt);
380 return copy_to_user(out, &cs_counters,
381 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
384 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
386 struct hl_device *hdev = hpriv->hdev;
387 struct asic_fixed_properties *prop = &hdev->asic_prop;
388 struct hl_info_sync_manager sm_info = {0};
389 u32 max_size = args->return_size;
390 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
392 if ((!max_size) || (!out))
395 if (args->dcore_id >= HL_MAX_DCORES)
398 sm_info.first_available_sync_object =
399 prop->first_available_user_sob[args->dcore_id];
400 sm_info.first_available_monitor =
401 prop->first_available_user_mon[args->dcore_id];
402 sm_info.first_available_cq =
403 prop->first_available_cq[args->dcore_id];
405 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
406 sizeof(sm_info))) ? -EFAULT : 0;
409 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
410 struct hl_info_args *args)
412 struct hl_device *hdev = hpriv->hdev;
413 struct hl_info_energy total_energy = {0};
414 u32 max_size = args->return_size;
415 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
418 if ((!max_size) || (!out))
421 rc = hl_fw_cpucp_total_energy_get(hdev,
422 &total_energy.total_energy_consumption);
426 return copy_to_user(out, &total_energy,
427 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
430 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
432 struct hl_device *hdev = hpriv->hdev;
433 struct hl_pll_frequency_info freq_info = { {0} };
434 u32 max_size = args->return_size;
435 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
438 if ((!max_size) || (!out))
441 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
445 return copy_to_user(out, &freq_info,
446 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
449 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
452 enum hl_device_status status;
453 struct hl_info_args *args = data;
454 struct hl_device *hdev = hpriv->hdev;
459 * Information is returned for the following opcodes even if the device
460 * is disabled or in reset.
463 case HL_INFO_HW_IP_INFO:
464 return hw_ip_info(hdev, args);
466 case HL_INFO_DEVICE_STATUS:
467 return device_status_info(hdev, args);
469 case HL_INFO_RESET_COUNT:
470 return get_reset_count(hdev, args);
476 if (!hl_device_operational(hdev, &status)) {
477 dev_warn_ratelimited(dev,
478 "Device is %s. Can't execute INFO IOCTL\n",
479 hdev->status[status]);
484 case HL_INFO_HW_EVENTS:
485 rc = hw_events_info(hdev, false, args);
488 case HL_INFO_DRAM_USAGE:
489 rc = dram_usage_info(hpriv, args);
492 case HL_INFO_HW_IDLE:
493 rc = hw_idle(hdev, args);
496 case HL_INFO_DEVICE_UTILIZATION:
497 rc = device_utilization(hdev, args);
500 case HL_INFO_HW_EVENTS_AGGREGATE:
501 rc = hw_events_info(hdev, true, args);
504 case HL_INFO_CLK_RATE:
505 rc = get_clk_rate(hdev, args);
508 case HL_INFO_TIME_SYNC:
509 return time_sync_info(hdev, args);
511 case HL_INFO_CS_COUNTERS:
512 return cs_counters_info(hpriv, args);
514 case HL_INFO_PCI_COUNTERS:
515 return pci_counters_info(hpriv, args);
517 case HL_INFO_CLK_THROTTLE_REASON:
518 return clk_throttle_info(hpriv, args);
520 case HL_INFO_SYNC_MANAGER:
521 return sync_manager_info(hpriv, args);
523 case HL_INFO_TOTAL_ENERGY:
524 return total_energy_consumption_info(hpriv, args);
526 case HL_INFO_PLL_FREQUENCY:
527 return pll_frequency_info(hpriv, args);
530 dev_err(dev, "Invalid request %d\n", args->op);
538 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
540 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
543 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
545 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
548 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
550 struct hl_debug_args *args = data;
551 struct hl_device *hdev = hpriv->hdev;
552 enum hl_device_status status;
556 if (!hl_device_operational(hdev, &status)) {
557 dev_warn_ratelimited(hdev->dev,
558 "Device is %s. Can't execute DEBUG IOCTL\n",
559 hdev->status[status]);
564 case HL_DEBUG_OP_ETR:
565 case HL_DEBUG_OP_ETF:
566 case HL_DEBUG_OP_STM:
567 case HL_DEBUG_OP_FUNNEL:
568 case HL_DEBUG_OP_BMON:
569 case HL_DEBUG_OP_SPMU:
570 case HL_DEBUG_OP_TIMESTAMP:
571 if (!hdev->in_debug) {
572 dev_err_ratelimited(hdev->dev,
573 "Rejecting debug configuration request because device not in debug mode\n");
577 min(args->input_size, hl_debug_struct_size[args->op]);
578 rc = debug_coresight(hdev, args);
580 case HL_DEBUG_OP_SET_MODE:
581 rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
584 dev_err(hdev->dev, "Invalid request %d\n", args->op);
592 #define HL_IOCTL_DEF(ioctl, _func) \
593 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
595 static const struct hl_ioctl_desc hl_ioctls[] = {
596 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
597 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
598 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
599 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
600 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
601 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
604 static const struct hl_ioctl_desc hl_ioctls_control[] = {
605 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
608 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
609 const struct hl_ioctl_desc *ioctl, struct device *dev)
611 struct hl_fpriv *hpriv = filep->private_data;
612 struct hl_device *hdev = hpriv->hdev;
613 unsigned int nr = _IOC_NR(cmd);
614 char stack_kdata[128] = {0};
616 unsigned int usize, asize;
621 if (hdev->hard_reset_pending) {
622 dev_crit_ratelimited(dev,
623 "Device HARD reset pending! Please close FD\n");
627 /* Do not trust userspace, use our own definition */
630 if (unlikely(!func)) {
631 dev_dbg(dev, "no function\n");
636 hl_size = _IOC_SIZE(ioctl->cmd);
637 usize = asize = _IOC_SIZE(cmd);
643 if (cmd & (IOC_IN | IOC_OUT)) {
644 if (asize <= sizeof(stack_kdata)) {
647 kdata = kzalloc(asize, GFP_KERNEL);
656 if (copy_from_user(kdata, (void __user *)arg, usize)) {
660 } else if (cmd & IOC_OUT) {
661 memset(kdata, 0, usize);
664 retcode = func(hpriv, kdata);
666 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
671 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
672 task_pid_nr(current), cmd, nr);
674 if (kdata != stack_kdata)
680 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
682 struct hl_fpriv *hpriv = filep->private_data;
683 struct hl_device *hdev = hpriv->hdev;
684 const struct hl_ioctl_desc *ioctl = NULL;
685 unsigned int nr = _IOC_NR(cmd);
688 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
692 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
693 ioctl = &hl_ioctls[nr];
695 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
696 task_pid_nr(current), nr);
700 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
703 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
705 struct hl_fpriv *hpriv = filep->private_data;
706 struct hl_device *hdev = hpriv->hdev;
707 const struct hl_ioctl_desc *ioctl = NULL;
708 unsigned int nr = _IOC_NR(cmd);
711 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
715 if (nr == _IOC_NR(HL_IOCTL_INFO)) {
716 ioctl = &hl_ioctls_control[nr];
718 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
719 task_pid_nr(current), nr);
723 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);