1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2010 Broadcom Corporation.
9 #include <linux/delay.h>
10 #include <linux/export.h>
11 #include <linux/sched/signal.h>
14 /* VPD access through PCI 2.2+ VPD capability */
17 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
18 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
22 const struct pci_vpd_ops *ops;
23 struct bin_attribute *attr; /* Descriptor for sysfs VPD entry */
33 * pci_read_vpd - Read one entry from Vital Product Data
34 * @dev: pci device struct
35 * @pos: offset in vpd space
36 * @count: number of bytes to read
37 * @buf: pointer to where to store result
39 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
41 if (!dev->vpd || !dev->vpd->ops)
43 return dev->vpd->ops->read(dev, pos, count, buf);
45 EXPORT_SYMBOL(pci_read_vpd);
48 * pci_write_vpd - Write entry to Vital Product Data
49 * @dev: pci device struct
50 * @pos: offset in vpd space
51 * @count: number of bytes to write
52 * @buf: buffer containing write data
54 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
56 if (!dev->vpd || !dev->vpd->ops)
58 return dev->vpd->ops->write(dev, pos, count, buf);
60 EXPORT_SYMBOL(pci_write_vpd);
62 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
65 * pci_vpd_size - determine actual size of Vital Product Data
66 * @dev: pci device struct
67 * @old_size: current assumed size, also maximum allowed size
69 static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
72 unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
74 while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
77 if (!header[0] && !off) {
78 pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
82 if (header[0] & PCI_VPD_LRDT) {
83 /* Large Resource Data Type Tag */
84 tag = pci_vpd_lrdt_tag(header);
85 /* Only read length from known tag items */
86 if ((tag == PCI_VPD_LTIN_ID_STRING) ||
87 (tag == PCI_VPD_LTIN_RO_DATA) ||
88 (tag == PCI_VPD_LTIN_RW_DATA)) {
89 if (pci_read_vpd(dev, off+1, 2,
91 pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
95 off += PCI_VPD_LRDT_TAG_SIZE +
96 pci_vpd_lrdt_size(header);
99 /* Short Resource Data Type Tag */
100 off += PCI_VPD_SRDT_TAG_SIZE +
101 pci_vpd_srdt_size(header);
102 tag = pci_vpd_srdt_tag(header);
105 if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
108 if ((tag != PCI_VPD_LTIN_ID_STRING) &&
109 (tag != PCI_VPD_LTIN_RO_DATA) &&
110 (tag != PCI_VPD_LTIN_RW_DATA)) {
111 pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
112 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
121 * Wait for last operation to complete.
122 * This code has to spin since there is no other notification from the PCI
123 * hardware. Since the VPD is often implemented by serial attachment to an
124 * EEPROM, it may take many milliseconds to complete.
126 * Returns 0 on success, negative values indicate error.
128 static int pci_vpd_wait(struct pci_dev *dev)
130 struct pci_vpd *vpd = dev->vpd;
131 unsigned long timeout = jiffies + msecs_to_jiffies(125);
132 unsigned long max_sleep = 16;
140 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
145 if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
150 if (fatal_signal_pending(current))
153 if (time_after(jiffies, timeout))
156 usleep_range(10, max_sleep);
157 if (max_sleep < 1024)
161 pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
165 static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
168 struct pci_vpd *vpd = dev->vpd;
170 loff_t end = pos + count;
178 vpd->len = pci_vpd_size(dev, vpd->len);
187 if (end > vpd->len) {
192 if (mutex_lock_killable(&vpd->lock))
195 ret = pci_vpd_wait(dev);
201 unsigned int i, skip;
203 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
208 vpd->flag = PCI_VPD_ADDR_F;
209 ret = pci_vpd_wait(dev);
213 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
218 for (i = 0; i < sizeof(u32); i++) {
228 mutex_unlock(&vpd->lock);
229 return ret ? ret : count;
232 static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
235 struct pci_vpd *vpd = dev->vpd;
237 loff_t end = pos + count;
240 if (pos < 0 || (pos & 3) || (count & 3))
245 vpd->len = pci_vpd_size(dev, vpd->len);
254 if (mutex_lock_killable(&vpd->lock))
257 ret = pci_vpd_wait(dev);
269 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
272 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
273 pos | PCI_VPD_ADDR_F);
279 ret = pci_vpd_wait(dev);
286 mutex_unlock(&vpd->lock);
287 return ret ? ret : count;
290 static const struct pci_vpd_ops pci_vpd_ops = {
291 .read = pci_vpd_read,
292 .write = pci_vpd_write,
295 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
298 struct pci_dev *tdev = pci_get_slot(dev->bus,
299 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
305 ret = pci_read_vpd(tdev, pos, count, arg);
310 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
313 struct pci_dev *tdev = pci_get_slot(dev->bus,
314 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
320 ret = pci_write_vpd(tdev, pos, count, arg);
325 static const struct pci_vpd_ops pci_vpd_f0_ops = {
326 .read = pci_vpd_f0_read,
327 .write = pci_vpd_f0_write,
330 int pci_vpd_init(struct pci_dev *dev)
335 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
339 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
343 vpd->len = PCI_VPD_MAX_SIZE;
344 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
345 vpd->ops = &pci_vpd_f0_ops;
347 vpd->ops = &pci_vpd_ops;
348 mutex_init(&vpd->lock);
356 void pci_vpd_release(struct pci_dev *dev)
361 static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
362 struct bin_attribute *bin_attr, char *buf,
363 loff_t off, size_t count)
365 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
367 return pci_read_vpd(dev, off, count, buf);
370 static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
371 struct bin_attribute *bin_attr, char *buf,
372 loff_t off, size_t count)
374 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
376 return pci_write_vpd(dev, off, count, buf);
379 void pcie_vpd_create_sysfs_dev_files(struct pci_dev *dev)
382 struct bin_attribute *attr;
387 attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
391 sysfs_bin_attr_init(attr);
393 attr->attr.name = "vpd";
394 attr->attr.mode = S_IRUSR | S_IWUSR;
395 attr->read = read_vpd_attr;
396 attr->write = write_vpd_attr;
397 retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
403 dev->vpd->attr = attr;
406 void pcie_vpd_remove_sysfs_dev_files(struct pci_dev *dev)
408 if (dev->vpd && dev->vpd->attr) {
409 sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
410 kfree(dev->vpd->attr);
414 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
418 for (i = off; i < len; ) {
421 if (val & PCI_VPD_LRDT) {
422 /* Don't return success of the tag isn't complete */
423 if (i + PCI_VPD_LRDT_TAG_SIZE > len)
429 i += PCI_VPD_LRDT_TAG_SIZE +
430 pci_vpd_lrdt_size(&buf[i]);
432 u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
437 if (tag == PCI_VPD_SRDT_END)
440 i += PCI_VPD_SRDT_TAG_SIZE +
441 pci_vpd_srdt_size(&buf[i]);
447 EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
449 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
450 unsigned int len, const char *kw)
454 for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
455 if (buf[i + 0] == kw[0] &&
459 i += PCI_VPD_INFO_FLD_HDR_SIZE +
460 pci_vpd_info_field_size(&buf[i]);
465 EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
467 #ifdef CONFIG_PCI_QUIRKS
469 * Quirk non-zero PCI functions to route VPD access through function 0 for
470 * devices that share VPD resources between functions. The functions are
471 * expected to be identical devices.
473 static void quirk_f0_vpd_link(struct pci_dev *dev)
477 if (!PCI_FUNC(dev->devfn))
480 f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
484 if (f0->vpd && dev->class == f0->class &&
485 dev->vendor == f0->vendor && dev->device == f0->device)
486 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
490 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
491 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
494 * If a device follows the VPD format spec, the PCI core will not read or
495 * write past the VPD End Tag. But some vendors do not follow the VPD
496 * format spec, so we can't tell how much data is safe to access. Devices
497 * may behave unpredictably if we access too much. Blacklist these devices
498 * so we don't touch VPD at all.
500 static void quirk_blacklist_vpd(struct pci_dev *dev)
504 pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
507 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
508 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
509 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
510 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
511 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
512 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
513 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
514 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
515 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
516 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
517 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
518 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
519 quirk_blacklist_vpd);
520 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
522 * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
523 * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
525 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
526 PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
528 static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
530 struct pci_vpd *vpd = dev->vpd;
532 if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
539 static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
541 int chip = (dev->device & 0xf000) >> 12;
542 int func = (dev->device & 0x0f00) >> 8;
543 int prod = (dev->device & 0x00ff) >> 0;
546 * If this is a T3-based adapter, there's a 1KB VPD area at offset
547 * 0xc00 which contains the preferred VPD values. If this is a T4 or
548 * later based adapter, the special VPD is at offset 0x400 for the
549 * Physical Functions (the SR-IOV Virtual Functions have no VPD
550 * Capabilities). The PCI VPD Access core routines will normally
551 * compute the size of the VPD by parsing the VPD Data Structure at
552 * offset 0x000. This will result in silent failures when attempting
553 * to accesses these other VPD areas which are beyond those computed
556 if (chip == 0x0 && prod >= 0x20)
557 pci_vpd_set_size(dev, 8192);
558 else if (chip >= 0x4 && func < 0x8)
559 pci_vpd_set_size(dev, 2048);
562 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
563 quirk_chelsio_extend_vpd);