2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/ctype.h>
39 /* start of per-port functions */
41 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
43 static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
45 struct qib_devdata *dd = ppd->dd;
47 return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
50 static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
53 struct qib_devdata *dd = ppd->dd;
57 ret = kstrtou16(buf, 0, &val);
59 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
64 * Set the "intentional" heartbeat enable per either of
65 * "Enable" and "Auto", as these are normally set together.
66 * This bit is consulted when leaving loopback mode,
67 * because entering loopback mode overrides it and automatically
70 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
71 return ret < 0 ? ret : count;
74 static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
77 struct qib_devdata *dd = ppd->dd;
80 r = dd->f_set_ib_loopback(ppd, buf);
87 static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
90 struct qib_devdata *dd = ppd->dd;
94 ret = kstrtou16(buf, 0, &val);
96 qib_dev_err(dd, "attempt to set invalid LED override\n");
100 qib_set_led_override(ppd, val);
104 static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
109 return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
113 * For userland compatibility, these offsets must remain fixed.
114 * They are strings for QIB_STATUS_*
116 static const char * const qib_status_str[] = {
126 "Fatal_Hardware_Error",
130 static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
143 for (any = i = 0; s && qib_status_str[i]; i++) {
146 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
148 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
156 strlcat(buf, "\n", PAGE_SIZE);
164 /* end of per-port functions */
167 * Start of per-port file structures and support code
168 * Because we are fitting into other infrastructure, we have to supply the
169 * full set of kobject/sysfs_ops structures and routines.
171 #define QIB_PORT_ATTR(name, mode, show, store) \
172 static struct qib_port_attr qib_port_attr_##name = \
173 __ATTR(name, mode, show, store)
175 struct qib_port_attr {
176 struct attribute attr;
177 ssize_t (*show)(struct qib_pportdata *, char *);
178 ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
181 QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
182 QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
183 QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
185 QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
186 QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
188 static struct attribute *port_default_attributes[] = {
189 &qib_port_attr_loopback.attr,
190 &qib_port_attr_led_override.attr,
191 &qib_port_attr_hrtbt_enable.attr,
192 &qib_port_attr_status.attr,
193 &qib_port_attr_status_str.attr,
198 * Start of per-port congestion control structures and support code
202 * Congestion control table size followed by table entries
204 static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
205 struct bin_attribute *bin_attr,
206 char *buf, loff_t pos, size_t count)
209 struct qib_pportdata *ppd =
210 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
212 if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
215 ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
221 if (count > ret - pos)
227 spin_lock(&ppd->cc_shadow_lock);
228 memcpy(buf, ppd->ccti_entries_shadow, count);
229 spin_unlock(&ppd->cc_shadow_lock);
234 static void qib_port_release(struct kobject *kobj)
236 /* nothing to do since memory is freed by qib_free_devdata() */
239 static struct kobj_type qib_port_cc_ktype = {
240 .release = qib_port_release,
243 static const struct bin_attribute cc_table_bin_attr = {
244 .attr = {.name = "cc_table_bin", .mode = 0444},
245 .read = read_cc_table_bin,
250 * Congestion settings: port control, control map and an array of 16
251 * entries for the congestion entries - increase, timer, event log
252 * trigger threshold and the minimum injection rate delay.
254 static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
255 struct bin_attribute *bin_attr,
256 char *buf, loff_t pos, size_t count)
259 struct qib_pportdata *ppd =
260 container_of(kobj, struct qib_pportdata, pport_cc_kobj);
262 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
265 ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
269 if (count > ret - pos)
275 spin_lock(&ppd->cc_shadow_lock);
276 memcpy(buf, ppd->congestion_entries_shadow, count);
277 spin_unlock(&ppd->cc_shadow_lock);
282 static const struct bin_attribute cc_setting_bin_attr = {
283 .attr = {.name = "cc_settings_bin", .mode = 0444},
284 .read = read_cc_setting_bin,
289 static ssize_t qib_portattr_show(struct kobject *kobj,
290 struct attribute *attr, char *buf)
292 struct qib_port_attr *pattr =
293 container_of(attr, struct qib_port_attr, attr);
294 struct qib_pportdata *ppd =
295 container_of(kobj, struct qib_pportdata, pport_kobj);
300 return pattr->show(ppd, buf);
303 static ssize_t qib_portattr_store(struct kobject *kobj,
304 struct attribute *attr, const char *buf, size_t len)
306 struct qib_port_attr *pattr =
307 container_of(attr, struct qib_port_attr, attr);
308 struct qib_pportdata *ppd =
309 container_of(kobj, struct qib_pportdata, pport_kobj);
314 return pattr->store(ppd, buf, len);
318 static const struct sysfs_ops qib_port_ops = {
319 .show = qib_portattr_show,
320 .store = qib_portattr_store,
323 static struct kobj_type qib_port_ktype = {
324 .release = qib_port_release,
325 .sysfs_ops = &qib_port_ops,
326 .default_attrs = port_default_attributes
331 #define QIB_SL2VL_ATTR(N) \
332 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
333 .attr = { .name = __stringify(N), .mode = 0444 }, \
337 struct qib_sl2vl_attr {
338 struct attribute attr;
359 static struct attribute *sl2vl_default_attributes[] = {
360 &qib_sl2vl_attr_0.attr,
361 &qib_sl2vl_attr_1.attr,
362 &qib_sl2vl_attr_2.attr,
363 &qib_sl2vl_attr_3.attr,
364 &qib_sl2vl_attr_4.attr,
365 &qib_sl2vl_attr_5.attr,
366 &qib_sl2vl_attr_6.attr,
367 &qib_sl2vl_attr_7.attr,
368 &qib_sl2vl_attr_8.attr,
369 &qib_sl2vl_attr_9.attr,
370 &qib_sl2vl_attr_10.attr,
371 &qib_sl2vl_attr_11.attr,
372 &qib_sl2vl_attr_12.attr,
373 &qib_sl2vl_attr_13.attr,
374 &qib_sl2vl_attr_14.attr,
375 &qib_sl2vl_attr_15.attr,
379 static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
382 struct qib_sl2vl_attr *sattr =
383 container_of(attr, struct qib_sl2vl_attr, attr);
384 struct qib_pportdata *ppd =
385 container_of(kobj, struct qib_pportdata, sl2vl_kobj);
386 struct qib_ibport *qibp = &ppd->ibport_data;
388 return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
391 static const struct sysfs_ops qib_sl2vl_ops = {
392 .show = sl2vl_attr_show,
395 static struct kobj_type qib_sl2vl_ktype = {
396 .release = qib_port_release,
397 .sysfs_ops = &qib_sl2vl_ops,
398 .default_attrs = sl2vl_default_attributes
403 /* Start diag_counters */
405 #define QIB_DIAGC_ATTR(N) \
406 static struct qib_diagc_attr qib_diagc_attr_##N = { \
407 .attr = { .name = __stringify(N), .mode = 0664 }, \
408 .counter = offsetof(struct qib_ibport, rvp.n_##N) \
411 #define QIB_DIAGC_ATTR_PER_CPU(N) \
412 static struct qib_diagc_attr qib_diagc_attr_##N = { \
413 .attr = { .name = __stringify(N), .mode = 0664 }, \
414 .counter = offsetof(struct qib_ibport, rvp.z_##N) \
417 struct qib_diagc_attr {
418 struct attribute attr;
422 QIB_DIAGC_ATTR_PER_CPU(rc_acks);
423 QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
424 QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
426 QIB_DIAGC_ATTR(rc_resends);
427 QIB_DIAGC_ATTR(seq_naks);
428 QIB_DIAGC_ATTR(rdma_seq);
429 QIB_DIAGC_ATTR(rnr_naks);
430 QIB_DIAGC_ATTR(other_naks);
431 QIB_DIAGC_ATTR(rc_timeouts);
432 QIB_DIAGC_ATTR(loop_pkts);
433 QIB_DIAGC_ATTR(pkt_drops);
434 QIB_DIAGC_ATTR(dmawait);
435 QIB_DIAGC_ATTR(unaligned);
436 QIB_DIAGC_ATTR(rc_dupreq);
437 QIB_DIAGC_ATTR(rc_seqnak);
438 QIB_DIAGC_ATTR(rc_crwaits);
440 static struct attribute *diagc_default_attributes[] = {
441 &qib_diagc_attr_rc_resends.attr,
442 &qib_diagc_attr_rc_acks.attr,
443 &qib_diagc_attr_rc_qacks.attr,
444 &qib_diagc_attr_rc_delayed_comp.attr,
445 &qib_diagc_attr_seq_naks.attr,
446 &qib_diagc_attr_rdma_seq.attr,
447 &qib_diagc_attr_rnr_naks.attr,
448 &qib_diagc_attr_other_naks.attr,
449 &qib_diagc_attr_rc_timeouts.attr,
450 &qib_diagc_attr_loop_pkts.attr,
451 &qib_diagc_attr_pkt_drops.attr,
452 &qib_diagc_attr_dmawait.attr,
453 &qib_diagc_attr_unaligned.attr,
454 &qib_diagc_attr_rc_dupreq.attr,
455 &qib_diagc_attr_rc_seqnak.attr,
456 &qib_diagc_attr_rc_crwaits.attr,
460 static u64 get_all_cpu_total(u64 __percpu *cntr)
465 for_each_possible_cpu(cpu)
466 counter += *per_cpu_ptr(cntr, cpu);
470 #define def_write_per_cpu(cntr) \
471 static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
473 struct qib_devdata *dd = ppd->dd; \
474 struct qib_ibport *qibp = &ppd->ibport_data; \
475 /* A write can only zero the counter */ \
477 qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
479 qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
482 def_write_per_cpu(rc_acks)
483 def_write_per_cpu(rc_qacks)
484 def_write_per_cpu(rc_delayed_comp)
486 #define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
489 static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
492 struct qib_diagc_attr *dattr =
493 container_of(attr, struct qib_diagc_attr, attr);
494 struct qib_pportdata *ppd =
495 container_of(kobj, struct qib_pportdata, diagc_kobj);
496 struct qib_ibport *qibp = &ppd->ibport_data;
499 if (!strncmp(dattr->attr.name, "rc_acks", 7))
500 val = READ_PER_CPU_CNTR(rc_acks);
501 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
502 val = READ_PER_CPU_CNTR(rc_qacks);
503 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
504 val = READ_PER_CPU_CNTR(rc_delayed_comp);
506 val = *(u32 *)((char *)qibp + dattr->counter);
508 return sysfs_emit(buf, "%llu\n", val);
511 static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
512 const char *buf, size_t size)
514 struct qib_diagc_attr *dattr =
515 container_of(attr, struct qib_diagc_attr, attr);
516 struct qib_pportdata *ppd =
517 container_of(kobj, struct qib_pportdata, diagc_kobj);
518 struct qib_ibport *qibp = &ppd->ibport_data;
522 ret = kstrtou32(buf, 0, &val);
526 if (!strncmp(dattr->attr.name, "rc_acks", 7))
527 write_per_cpu_rc_acks(ppd, val);
528 else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
529 write_per_cpu_rc_qacks(ppd, val);
530 else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
531 write_per_cpu_rc_delayed_comp(ppd, val);
533 *(u32 *)((char *)qibp + dattr->counter) = val;
537 static const struct sysfs_ops qib_diagc_ops = {
538 .show = diagc_attr_show,
539 .store = diagc_attr_store,
542 static struct kobj_type qib_diagc_ktype = {
543 .release = qib_port_release,
544 .sysfs_ops = &qib_diagc_ops,
545 .default_attrs = diagc_default_attributes
548 /* End diag_counters */
550 /* end of per-port file structures and support code */
553 * Start of per-unit (or driver, in some cases, but replicated
554 * per unit) functions (these get a device *)
556 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
559 struct qib_ibdev *dev =
560 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
562 return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
564 static DEVICE_ATTR_RO(hw_rev);
566 static ssize_t hca_type_show(struct device *device,
567 struct device_attribute *attr, char *buf)
569 struct qib_ibdev *dev =
570 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
571 struct qib_devdata *dd = dd_from_dev(dev);
575 return sysfs_emit(buf, "%s\n", dd->boardname);
577 static DEVICE_ATTR_RO(hca_type);
578 static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
580 static ssize_t version_show(struct device *device,
581 struct device_attribute *attr, char *buf)
583 /* The string printed here is already newline-terminated. */
584 return sysfs_emit(buf, "%s", (char *)ib_qib_version);
586 static DEVICE_ATTR_RO(version);
588 static ssize_t boardversion_show(struct device *device,
589 struct device_attribute *attr, char *buf)
591 struct qib_ibdev *dev =
592 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
593 struct qib_devdata *dd = dd_from_dev(dev);
595 /* The string printed here is already newline-terminated. */
596 return sysfs_emit(buf, "%s", dd->boardversion);
598 static DEVICE_ATTR_RO(boardversion);
600 static ssize_t localbus_info_show(struct device *device,
601 struct device_attribute *attr, char *buf)
603 struct qib_ibdev *dev =
604 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
605 struct qib_devdata *dd = dd_from_dev(dev);
607 /* The string printed here is already newline-terminated. */
608 return sysfs_emit(buf, "%s", dd->lbus_info);
610 static DEVICE_ATTR_RO(localbus_info);
612 static ssize_t nctxts_show(struct device *device,
613 struct device_attribute *attr, char *buf)
615 struct qib_ibdev *dev =
616 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
617 struct qib_devdata *dd = dd_from_dev(dev);
619 /* Return the number of user ports (contexts) available. */
620 /* The calculation below deals with a special case where
621 * cfgctxts is set to 1 on a single-port board. */
622 return sysfs_emit(buf, "%u\n",
623 (dd->first_user_ctxt > dd->cfgctxts) ?
625 (dd->cfgctxts - dd->first_user_ctxt));
627 static DEVICE_ATTR_RO(nctxts);
629 static ssize_t nfreectxts_show(struct device *device,
630 struct device_attribute *attr, char *buf)
632 struct qib_ibdev *dev =
633 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
634 struct qib_devdata *dd = dd_from_dev(dev);
636 /* Return the number of free user ports (contexts) available. */
637 return sysfs_emit(buf, "%u\n", dd->freectxts);
639 static DEVICE_ATTR_RO(nfreectxts);
641 static ssize_t serial_show(struct device *device, struct device_attribute *attr,
644 struct qib_ibdev *dev =
645 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
646 struct qib_devdata *dd = dd_from_dev(dev);
647 const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
648 int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
650 return sysfs_emit(buf, ".%*s\n", size, dd->serial);
652 static DEVICE_ATTR_RO(serial);
654 static ssize_t chip_reset_store(struct device *device,
655 struct device_attribute *attr, const char *buf,
658 struct qib_ibdev *dev =
659 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
660 struct qib_devdata *dd = dd_from_dev(dev);
663 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
668 ret = qib_reset_device(dd->unit);
670 return ret < 0 ? ret : count;
672 static DEVICE_ATTR_WO(chip_reset);
675 * Dump tempsense regs. in decimal, to ease shell-scripts.
677 static ssize_t tempsense_show(struct device *device,
678 struct device_attribute *attr, char *buf)
680 struct qib_ibdev *dev =
681 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
682 struct qib_devdata *dd = dd_from_dev(dev);
686 for (i = 0; i < 8; i++) {
691 ret = dd->f_tempsense_rd(dd, i);
693 return ret; /* return error on bad read */
696 return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
697 (signed char)regvals[0],
698 (signed char)regvals[1],
701 (signed char)regvals[5],
702 (signed char)regvals[7]);
704 static DEVICE_ATTR_RO(tempsense);
707 * end of per-unit (or driver, in some cases, but replicated
708 * per unit) functions
711 /* start of per-unit file structures and support code */
712 static struct attribute *qib_attributes[] = {
713 &dev_attr_hw_rev.attr,
714 &dev_attr_hca_type.attr,
715 &dev_attr_board_id.attr,
716 &dev_attr_version.attr,
717 &dev_attr_nctxts.attr,
718 &dev_attr_nfreectxts.attr,
719 &dev_attr_serial.attr,
720 &dev_attr_boardversion.attr,
721 &dev_attr_tempsense.attr,
722 &dev_attr_localbus_info.attr,
723 &dev_attr_chip_reset.attr,
727 const struct attribute_group qib_attr_group = {
728 .attrs = qib_attributes,
731 int qib_create_port_files(struct ib_device *ibdev, u32 port_num,
732 struct kobject *kobj)
734 struct qib_pportdata *ppd;
735 struct qib_devdata *dd = dd_from_ibdev(ibdev);
738 if (!port_num || port_num > dd->num_pports) {
740 "Skipping infiniband class with invalid port %u\n",
745 ppd = &dd->pport[port_num - 1];
747 ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
751 "Skipping linkcontrol sysfs info, (err %d) port %u\n",
755 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
757 ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
761 "Skipping sl2vl sysfs info, (err %d) port %u\n",
765 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
767 ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
771 "Skipping diag_counters sysfs info, (err %d) port %u\n",
775 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
777 if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
780 ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
784 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
789 kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
791 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
792 &cc_setting_bin_attr);
795 "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
800 ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
804 "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
806 goto bail_cc_entry_bin;
809 qib_devinfo(dd->pcidev,
810 "IB%u: Congestion Control Agent enabled for port %d\n",
816 sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
818 kobject_put(&ppd->pport_cc_kobj);
820 kobject_put(&ppd->diagc_kobj);
822 kobject_put(&ppd->sl2vl_kobj);
824 kobject_put(&ppd->pport_kobj);
830 * Unregister and remove our files in /sys/class/infiniband.
832 void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
834 struct qib_pportdata *ppd;
837 for (i = 0; i < dd->num_pports; i++) {
839 if (qib_cc_table_size &&
840 ppd->congestion_entries_shadow) {
841 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
842 &cc_setting_bin_attr);
843 sysfs_remove_bin_file(&ppd->pport_cc_kobj,
845 kobject_put(&ppd->pport_cc_kobj);
847 kobject_put(&ppd->diagc_kobj);
848 kobject_put(&ppd->sl2vl_kobj);
849 kobject_put(&ppd->pport_kobj);