2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/ctype.h>
35 #include <rdma/ib_sysfs.h>
40 static struct qib_pportdata *qib_get_pportdata_kobj(struct kobject *kobj)
43 struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
44 struct qib_devdata *dd = dd_from_ibdev(ibdev);
46 return &dd->pport[port_num - 1];
50 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
52 static ssize_t hrtbt_enable_show(struct ib_device *ibdev, u32 port_num,
53 struct ib_port_attribute *attr, char *buf)
55 struct qib_devdata *dd = dd_from_ibdev(ibdev);
56 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
58 return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
61 static ssize_t hrtbt_enable_store(struct ib_device *ibdev, u32 port_num,
62 struct ib_port_attribute *attr,
63 const char *buf, size_t count)
65 struct qib_devdata *dd = dd_from_ibdev(ibdev);
66 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
70 ret = kstrtou16(buf, 0, &val);
72 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
77 * Set the "intentional" heartbeat enable per either of
78 * "Enable" and "Auto", as these are normally set together.
79 * This bit is consulted when leaving loopback mode,
80 * because entering loopback mode overrides it and automatically
83 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
84 return ret < 0 ? ret : count;
86 static IB_PORT_ATTR_RW(hrtbt_enable);
88 static ssize_t loopback_store(struct ib_device *ibdev, u32 port_num,
89 struct ib_port_attribute *attr, const char *buf,
92 struct qib_devdata *dd = dd_from_ibdev(ibdev);
93 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
96 r = dd->f_set_ib_loopback(ppd, buf);
102 static IB_PORT_ATTR_WO(loopback);
104 static ssize_t led_override_store(struct ib_device *ibdev, u32 port_num,
105 struct ib_port_attribute *attr,
106 const char *buf, size_t count)
108 struct qib_devdata *dd = dd_from_ibdev(ibdev);
109 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
113 ret = kstrtou16(buf, 0, &val);
115 qib_dev_err(dd, "attempt to set invalid LED override\n");
119 qib_set_led_override(ppd, val);
122 static IB_PORT_ATTR_WO(led_override);
124 static ssize_t status_show(struct ib_device *ibdev, u32 port_num,
125 struct ib_port_attribute *attr, char *buf)
127 struct qib_devdata *dd = dd_from_ibdev(ibdev);
128 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
133 return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
135 static IB_PORT_ATTR_RO(status);
138 * For userland compatibility, these offsets must remain fixed.
139 * They are strings for QIB_STATUS_*
141 static const char * const qib_status_str[] = {
151 "Fatal_Hardware_Error",
155 static ssize_t status_str_show(struct ib_device *ibdev, u32 port_num,
156 struct ib_port_attribute *attr, char *buf)
158 struct qib_devdata *dd = dd_from_ibdev(ibdev);
159 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
171 for (any = i = 0; s && qib_status_str[i]; i++) {
174 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
176 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
184 strlcat(buf, "\n", PAGE_SIZE);
191 static IB_PORT_ATTR_RO(status_str);
193 /* end of per-port functions */
195 static struct attribute *port_linkcontrol_attributes[] = {
196 &ib_port_attr_loopback.attr,
197 &ib_port_attr_led_override.attr,
198 &ib_port_attr_hrtbt_enable.attr,
199 &ib_port_attr_status.attr,
200 &ib_port_attr_status_str.attr,
204 static const struct attribute_group port_linkcontrol_group = {
205 .name = "linkcontrol",
206 .attrs = port_linkcontrol_attributes,
210 * Start of per-port congestion control structures and support code
214 * Congestion control table size followed by table entries
216 static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
217 struct bin_attribute *bin_attr, char *buf,
218 loff_t pos, size_t count)
220 struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
223 if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
226 ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
232 if (count > ret - pos)
238 spin_lock(&ppd->cc_shadow_lock);
239 memcpy(buf, ppd->ccti_entries_shadow, count);
240 spin_unlock(&ppd->cc_shadow_lock);
244 static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
247 * Congestion settings: port control, control map and an array of 16
248 * entries for the congestion entries - increase, timer, event log
249 * trigger threshold and the minimum injection rate delay.
251 static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
252 struct bin_attribute *bin_attr, char *buf,
253 loff_t pos, size_t count)
255 struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
258 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
261 ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
265 if (count > ret - pos)
271 spin_lock(&ppd->cc_shadow_lock);
272 memcpy(buf, ppd->congestion_entries_shadow, count);
273 spin_unlock(&ppd->cc_shadow_lock);
277 static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
279 static struct bin_attribute *port_ccmgta_attributes[] = {
280 &bin_attr_cc_setting_bin,
281 &bin_attr_cc_table_bin,
285 static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
286 struct bin_attribute *attr, int n)
288 struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
290 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
292 return attr->attr.mode;
295 static const struct attribute_group port_ccmgta_attribute_group = {
297 .is_bin_visible = qib_ccmgta_is_bin_visible,
298 .bin_attrs = port_ccmgta_attributes,
303 struct qib_sl2vl_attr {
304 struct ib_port_attribute attr;
308 static ssize_t sl2vl_attr_show(struct ib_device *ibdev, u32 port_num,
309 struct ib_port_attribute *attr, char *buf)
311 struct qib_sl2vl_attr *sattr =
312 container_of(attr, struct qib_sl2vl_attr, attr);
313 struct qib_devdata *dd = dd_from_ibdev(ibdev);
314 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
316 return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
319 #define QIB_SL2VL_ATTR(N) \
320 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
321 .attr = __ATTR(N, 0444, sl2vl_attr_show, NULL), \
342 static struct attribute *port_sl2vl_attributes[] = {
343 &qib_sl2vl_attr_0.attr.attr,
344 &qib_sl2vl_attr_1.attr.attr,
345 &qib_sl2vl_attr_2.attr.attr,
346 &qib_sl2vl_attr_3.attr.attr,
347 &qib_sl2vl_attr_4.attr.attr,
348 &qib_sl2vl_attr_5.attr.attr,
349 &qib_sl2vl_attr_6.attr.attr,
350 &qib_sl2vl_attr_7.attr.attr,
351 &qib_sl2vl_attr_8.attr.attr,
352 &qib_sl2vl_attr_9.attr.attr,
353 &qib_sl2vl_attr_10.attr.attr,
354 &qib_sl2vl_attr_11.attr.attr,
355 &qib_sl2vl_attr_12.attr.attr,
356 &qib_sl2vl_attr_13.attr.attr,
357 &qib_sl2vl_attr_14.attr.attr,
358 &qib_sl2vl_attr_15.attr.attr,
362 static const struct attribute_group port_sl2vl_group = {
364 .attrs = port_sl2vl_attributes,
369 /* Start diag_counters */
371 struct qib_diagc_attr {
372 struct ib_port_attribute attr;
376 static ssize_t diagc_attr_show(struct ib_device *ibdev, u32 port_num,
377 struct ib_port_attribute *attr, char *buf)
379 struct qib_diagc_attr *dattr =
380 container_of(attr, struct qib_diagc_attr, attr);
381 struct qib_devdata *dd = dd_from_ibdev(ibdev);
382 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
384 return sysfs_emit(buf, "%llu\n", *((u64 *)qibp + dattr->counter));
387 static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
388 struct ib_port_attribute *attr, const char *buf,
391 struct qib_diagc_attr *dattr =
392 container_of(attr, struct qib_diagc_attr, attr);
393 struct qib_devdata *dd = dd_from_ibdev(ibdev);
394 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
398 ret = kstrtou64(buf, 0, &val);
401 *((u64 *)qibp + dattr->counter) = val;
405 #define QIB_DIAGC_ATTR(N) \
406 static struct qib_diagc_attr qib_diagc_attr_##N = { \
407 .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
408 .counter = &((struct qib_ibport *)0)->rvp.n_##N - (u64 *)0, \
411 QIB_DIAGC_ATTR(rc_resends);
412 QIB_DIAGC_ATTR(seq_naks);
413 QIB_DIAGC_ATTR(rdma_seq);
414 QIB_DIAGC_ATTR(rnr_naks);
415 QIB_DIAGC_ATTR(other_naks);
416 QIB_DIAGC_ATTR(rc_timeouts);
417 QIB_DIAGC_ATTR(loop_pkts);
418 QIB_DIAGC_ATTR(pkt_drops);
419 QIB_DIAGC_ATTR(dmawait);
420 QIB_DIAGC_ATTR(unaligned);
421 QIB_DIAGC_ATTR(rc_dupreq);
422 QIB_DIAGC_ATTR(rc_seqnak);
423 QIB_DIAGC_ATTR(rc_crwaits);
425 static u64 get_all_cpu_total(u64 __percpu *cntr)
430 for_each_possible_cpu(cpu)
431 counter += *per_cpu_ptr(cntr, cpu);
435 static ssize_t qib_store_per_cpu(struct qib_devdata *dd, const char *buf,
436 size_t count, u64 *zero, u64 cur)
441 ret = kstrtou32(buf, 0, &val);
445 qib_dev_err(dd, "Per CPU cntrs can only be zeroed");
452 static ssize_t rc_acks_show(struct ib_device *ibdev, u32 port_num,
453 struct ib_port_attribute *attr, char *buf)
455 struct qib_devdata *dd = dd_from_ibdev(ibdev);
456 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
458 return sysfs_emit(buf, "%llu\n",
459 get_all_cpu_total(qibp->rvp.rc_acks) -
460 qibp->rvp.z_rc_acks);
463 static ssize_t rc_acks_store(struct ib_device *ibdev, u32 port_num,
464 struct ib_port_attribute *attr, const char *buf,
467 struct qib_devdata *dd = dd_from_ibdev(ibdev);
468 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
470 return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_acks,
471 get_all_cpu_total(qibp->rvp.rc_acks));
473 static IB_PORT_ATTR_RW(rc_acks);
475 static ssize_t rc_qacks_show(struct ib_device *ibdev, u32 port_num,
476 struct ib_port_attribute *attr, char *buf)
478 struct qib_devdata *dd = dd_from_ibdev(ibdev);
479 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
481 return sysfs_emit(buf, "%llu\n",
482 get_all_cpu_total(qibp->rvp.rc_qacks) -
483 qibp->rvp.z_rc_qacks);
486 static ssize_t rc_qacks_store(struct ib_device *ibdev, u32 port_num,
487 struct ib_port_attribute *attr, const char *buf,
490 struct qib_devdata *dd = dd_from_ibdev(ibdev);
491 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
493 return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_qacks,
494 get_all_cpu_total(qibp->rvp.rc_qacks));
496 static IB_PORT_ATTR_RW(rc_qacks);
498 static ssize_t rc_delayed_comp_show(struct ib_device *ibdev, u32 port_num,
499 struct ib_port_attribute *attr, char *buf)
501 struct qib_devdata *dd = dd_from_ibdev(ibdev);
502 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
504 return sysfs_emit(buf, "%llu\n",
505 get_all_cpu_total(qibp->rvp.rc_delayed_comp) -
506 qibp->rvp.z_rc_delayed_comp);
509 static ssize_t rc_delayed_comp_store(struct ib_device *ibdev, u32 port_num,
510 struct ib_port_attribute *attr,
511 const char *buf, size_t count)
513 struct qib_devdata *dd = dd_from_ibdev(ibdev);
514 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
516 return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_delayed_comp,
517 get_all_cpu_total(qibp->rvp.rc_delayed_comp));
519 static IB_PORT_ATTR_RW(rc_delayed_comp);
521 static struct attribute *port_diagc_attributes[] = {
522 &qib_diagc_attr_rc_resends.attr.attr,
523 &qib_diagc_attr_seq_naks.attr.attr,
524 &qib_diagc_attr_rdma_seq.attr.attr,
525 &qib_diagc_attr_rnr_naks.attr.attr,
526 &qib_diagc_attr_other_naks.attr.attr,
527 &qib_diagc_attr_rc_timeouts.attr.attr,
528 &qib_diagc_attr_loop_pkts.attr.attr,
529 &qib_diagc_attr_pkt_drops.attr.attr,
530 &qib_diagc_attr_dmawait.attr.attr,
531 &qib_diagc_attr_unaligned.attr.attr,
532 &qib_diagc_attr_rc_dupreq.attr.attr,
533 &qib_diagc_attr_rc_seqnak.attr.attr,
534 &qib_diagc_attr_rc_crwaits.attr.attr,
535 &ib_port_attr_rc_acks.attr,
536 &ib_port_attr_rc_qacks.attr,
537 &ib_port_attr_rc_delayed_comp.attr,
541 static const struct attribute_group port_diagc_group = {
542 .name = "linkcontrol",
543 .attrs = port_diagc_attributes,
546 /* End diag_counters */
548 const struct attribute_group *qib_attr_port_groups[] = {
549 &port_linkcontrol_group,
550 &port_ccmgta_attribute_group,
556 /* end of per-port file structures and support code */
559 * Start of per-unit (or driver, in some cases, but replicated
560 * per unit) functions (these get a device *)
562 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
565 struct qib_ibdev *dev =
566 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
568 return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
570 static DEVICE_ATTR_RO(hw_rev);
572 static ssize_t hca_type_show(struct device *device,
573 struct device_attribute *attr, char *buf)
575 struct qib_ibdev *dev =
576 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
577 struct qib_devdata *dd = dd_from_dev(dev);
581 return sysfs_emit(buf, "%s\n", dd->boardname);
583 static DEVICE_ATTR_RO(hca_type);
584 static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
586 static ssize_t version_show(struct device *device,
587 struct device_attribute *attr, char *buf)
589 /* The string printed here is already newline-terminated. */
590 return sysfs_emit(buf, "%s", (char *)ib_qib_version);
592 static DEVICE_ATTR_RO(version);
594 static ssize_t boardversion_show(struct device *device,
595 struct device_attribute *attr, char *buf)
597 struct qib_ibdev *dev =
598 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
599 struct qib_devdata *dd = dd_from_dev(dev);
601 /* The string printed here is already newline-terminated. */
602 return sysfs_emit(buf, "%s", dd->boardversion);
604 static DEVICE_ATTR_RO(boardversion);
606 static ssize_t localbus_info_show(struct device *device,
607 struct device_attribute *attr, char *buf)
609 struct qib_ibdev *dev =
610 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
611 struct qib_devdata *dd = dd_from_dev(dev);
613 /* The string printed here is already newline-terminated. */
614 return sysfs_emit(buf, "%s", dd->lbus_info);
616 static DEVICE_ATTR_RO(localbus_info);
618 static ssize_t nctxts_show(struct device *device,
619 struct device_attribute *attr, char *buf)
621 struct qib_ibdev *dev =
622 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
623 struct qib_devdata *dd = dd_from_dev(dev);
625 /* Return the number of user ports (contexts) available. */
626 /* The calculation below deals with a special case where
627 * cfgctxts is set to 1 on a single-port board. */
628 return sysfs_emit(buf, "%u\n",
629 (dd->first_user_ctxt > dd->cfgctxts) ?
631 (dd->cfgctxts - dd->first_user_ctxt));
633 static DEVICE_ATTR_RO(nctxts);
635 static ssize_t nfreectxts_show(struct device *device,
636 struct device_attribute *attr, char *buf)
638 struct qib_ibdev *dev =
639 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
640 struct qib_devdata *dd = dd_from_dev(dev);
642 /* Return the number of free user ports (contexts) available. */
643 return sysfs_emit(buf, "%u\n", dd->freectxts);
645 static DEVICE_ATTR_RO(nfreectxts);
647 static ssize_t serial_show(struct device *device, struct device_attribute *attr,
650 struct qib_ibdev *dev =
651 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
652 struct qib_devdata *dd = dd_from_dev(dev);
653 const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
654 int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
656 return sysfs_emit(buf, ".%*s\n", size, dd->serial);
658 static DEVICE_ATTR_RO(serial);
660 static ssize_t chip_reset_store(struct device *device,
661 struct device_attribute *attr, const char *buf,
664 struct qib_ibdev *dev =
665 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
666 struct qib_devdata *dd = dd_from_dev(dev);
669 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
674 ret = qib_reset_device(dd->unit);
676 return ret < 0 ? ret : count;
678 static DEVICE_ATTR_WO(chip_reset);
681 * Dump tempsense regs. in decimal, to ease shell-scripts.
683 static ssize_t tempsense_show(struct device *device,
684 struct device_attribute *attr, char *buf)
686 struct qib_ibdev *dev =
687 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
688 struct qib_devdata *dd = dd_from_dev(dev);
692 for (i = 0; i < 8; i++) {
697 ret = dd->f_tempsense_rd(dd, i);
699 return ret; /* return error on bad read */
702 return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
703 (signed char)regvals[0],
704 (signed char)regvals[1],
707 (signed char)regvals[5],
708 (signed char)regvals[7]);
710 static DEVICE_ATTR_RO(tempsense);
713 * end of per-unit (or driver, in some cases, but replicated
714 * per unit) functions
717 /* start of per-unit file structures and support code */
718 static struct attribute *qib_attributes[] = {
719 &dev_attr_hw_rev.attr,
720 &dev_attr_hca_type.attr,
721 &dev_attr_board_id.attr,
722 &dev_attr_version.attr,
723 &dev_attr_nctxts.attr,
724 &dev_attr_nfreectxts.attr,
725 &dev_attr_serial.attr,
726 &dev_attr_boardversion.attr,
727 &dev_attr_tempsense.attr,
728 &dev_attr_localbus_info.attr,
729 &dev_attr_chip_reset.attr,
733 const struct attribute_group qib_attr_group = {
734 .attrs = qib_attributes,