1 // SPDX-License-Identifier: GPL-2.0
3 * PCI Peer 2 Peer DMA support.
5 * Copyright (c) 2016-2018, Logan Gunthorpe
6 * Copyright (c) 2016-2017, Microsemi Corporation
7 * Copyright (c) 2017, Christoph Hellwig
8 * Copyright (c) 2018, Eideticom Inc.
11 #define pr_fmt(fmt) "pci-p2pdma: " fmt
12 #include <linux/ctype.h>
13 #include <linux/pci-p2pdma.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/genalloc.h>
17 #include <linux/memremap.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/random.h>
20 #include <linux/seq_buf.h>
21 #include <linux/iommu.h>
23 enum pci_p2pdma_map_type {
24 PCI_P2PDMA_MAP_UNKNOWN = 0,
25 PCI_P2PDMA_MAP_NOT_SUPPORTED,
26 PCI_P2PDMA_MAP_BUS_ADDR,
27 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
31 struct gen_pool *pool;
32 bool p2pmem_published;
35 struct pci_p2pdma_pagemap {
36 struct dev_pagemap pgmap;
37 struct pci_dev *provider;
41 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
43 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
46 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
49 struct pci_dev *pdev = to_pci_dev(dev);
52 if (pdev->p2pdma->pool)
53 size = gen_pool_size(pdev->p2pdma->pool);
55 return snprintf(buf, PAGE_SIZE, "%zd\n", size);
57 static DEVICE_ATTR_RO(size);
59 static ssize_t available_show(struct device *dev, struct device_attribute *attr,
62 struct pci_dev *pdev = to_pci_dev(dev);
65 if (pdev->p2pdma->pool)
66 avail = gen_pool_avail(pdev->p2pdma->pool);
68 return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
70 static DEVICE_ATTR_RO(available);
72 static ssize_t published_show(struct device *dev, struct device_attribute *attr,
75 struct pci_dev *pdev = to_pci_dev(dev);
77 return snprintf(buf, PAGE_SIZE, "%d\n",
78 pdev->p2pdma->p2pmem_published);
80 static DEVICE_ATTR_RO(published);
82 static struct attribute *p2pmem_attrs[] = {
84 &dev_attr_available.attr,
85 &dev_attr_published.attr,
89 static const struct attribute_group p2pmem_group = {
90 .attrs = p2pmem_attrs,
94 static void pci_p2pdma_release(void *data)
96 struct pci_dev *pdev = data;
97 struct pci_p2pdma *p2pdma = pdev->p2pdma;
102 /* Flush and disable pci_alloc_p2p_mem() */
106 gen_pool_destroy(p2pdma->pool);
107 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
110 static int pci_p2pdma_setup(struct pci_dev *pdev)
113 struct pci_p2pdma *p2p;
115 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
119 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
123 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
125 goto out_pool_destroy;
129 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
131 goto out_pool_destroy;
137 gen_pool_destroy(p2p->pool);
139 devm_kfree(&pdev->dev, p2p);
144 * pci_p2pdma_add_resource - add memory for use as p2p memory
145 * @pdev: the device to add the memory to
146 * @bar: PCI BAR to add
147 * @size: size of the memory to add, may be zero to use the whole BAR
148 * @offset: offset into the PCI BAR
150 * The memory will be given ZONE_DEVICE struct pages so that it may
151 * be used with any DMA request.
153 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
156 struct pci_p2pdma_pagemap *p2p_pgmap;
157 struct dev_pagemap *pgmap;
161 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
164 if (offset >= pci_resource_len(pdev, bar))
168 size = pci_resource_len(pdev, bar) - offset;
170 if (size + offset > pci_resource_len(pdev, bar))
174 error = pci_p2pdma_setup(pdev);
179 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
183 pgmap = &p2p_pgmap->pgmap;
184 pgmap->res.start = pci_resource_start(pdev, bar) + offset;
185 pgmap->res.end = pgmap->res.start + size - 1;
186 pgmap->res.flags = pci_resource_flags(pdev, bar);
187 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
189 p2p_pgmap->provider = pdev;
190 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
191 pci_resource_start(pdev, bar);
193 addr = devm_memremap_pages(&pdev->dev, pgmap);
195 error = PTR_ERR(addr);
199 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
200 pci_bus_address(pdev, bar) + offset,
201 resource_size(&pgmap->res), dev_to_node(&pdev->dev),
206 pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
212 devm_memunmap_pages(&pdev->dev, pgmap);
214 devm_kfree(&pdev->dev, pgmap);
217 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
220 * Note this function returns the parent PCI device with a
221 * reference taken. It is the caller's responsibility to drop
224 static struct pci_dev *find_parent_pci_dev(struct device *dev)
226 struct device *parent;
228 dev = get_device(dev);
232 return to_pci_dev(dev);
234 parent = get_device(dev->parent);
243 * Check if a PCI bridge has its ACS redirection bits set to redirect P2P
244 * TLPs upstream via ACS. Returns 1 if the packets will be redirected
245 * upstream, 0 otherwise.
247 static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
252 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
256 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
258 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
264 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
269 seq_buf_printf(buf, "%s;", pci_name(pdev));
273 * If we can't find a common upstream bridge take a look at the root
274 * complex and compare it to a whitelist of known good hardware.
276 static bool root_complex_whitelist(struct pci_dev *dev)
278 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
279 struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
280 unsigned short vendor, device;
282 if (iommu_present(dev->dev.bus))
288 vendor = root->vendor;
289 device = root->device;
292 /* AMD ZEN host bridges can do peer to peer */
293 if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450)
299 static enum pci_p2pdma_map_type
300 __upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
301 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
303 struct pci_dev *a = provider, *b = client, *bb;
309 *acs_redirects = false;
312 * Note, we don't need to take references to devices returned by
313 * pci_upstream_bridge() seeing we hold a reference to a child
314 * device which will already hold a reference to the upstream bridge.
320 if (pci_bridge_has_acs_redir(a)) {
321 seq_buf_print_bus_devfn(acs_list, a);
329 goto check_b_path_acs;
331 bb = pci_upstream_bridge(bb);
335 a = pci_upstream_bridge(a);
340 *dist = dist_a + dist_b;
342 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
351 if (pci_bridge_has_acs_redir(bb)) {
352 seq_buf_print_bus_devfn(acs_list, bb);
356 bb = pci_upstream_bridge(bb);
360 *dist = dist_a + dist_b;
364 *acs_redirects = true;
366 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
369 return PCI_P2PDMA_MAP_BUS_ADDR;
373 * Find the distance through the nearest common upstream bridge between
376 * If the two devices are the same device then 0 will be returned.
378 * If there are two virtual functions of the same device behind the same
379 * bridge port then 2 will be returned (one step down to the PCIe switch,
380 * then one step back to the same device).
382 * In the case where two devices are connected to the same PCIe switch, the
383 * value 4 will be returned. This corresponds to the following PCI tree:
386 * \+ Switch Upstream Port
387 * +-+ Switch Downstream Port
389 * \-+ Switch Downstream Port
392 * The distance is 4 because we traverse from Device A through the downstream
393 * port of the switch, to the common upstream port, back up to the second
394 * downstream port and then to Device B.
396 * Any two devices that cannot communicate using p2pdma will return
397 * PCI_P2PDMA_MAP_NOT_SUPPORTED.
399 * Any two devices that have a data path that goes through the host bridge
400 * will consult a whitelist. If the host bridges are on the whitelist,
401 * this function will return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE.
403 * If either bridge is not on the whitelist this function returns
404 * PCI_P2PDMA_MAP_NOT_SUPPORTED.
406 * If a bridge which has any ACS redirection bits set is in the path,
407 * acs_redirects will be set to true. In this case, a list of all infringing
408 * bridge addresses will be populated in acs_list (assuming it's non-null)
409 * for printk purposes.
411 static enum pci_p2pdma_map_type
412 upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
413 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
415 enum pci_p2pdma_map_type map_type;
417 map_type = __upstream_bridge_distance(provider, client, dist,
418 acs_redirects, acs_list);
420 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) {
421 if (!root_complex_whitelist(provider) ||
422 !root_complex_whitelist(client))
423 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
429 static enum pci_p2pdma_map_type
430 upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
433 struct seq_buf acs_list;
437 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
438 if (!acs_list.buffer)
441 ret = upstream_bridge_distance(provider, client, dist, &acs_redirects,
444 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
446 /* Drop final semicolon */
447 acs_list.buffer[acs_list.len-1] = 0;
448 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
452 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) {
453 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
457 kfree(acs_list.buffer);
463 * pci_p2pdma_distance_many - Determine the cumulative distance between
464 * a p2pdma provider and the clients in use.
465 * @provider: p2pdma provider to check against the client list
466 * @clients: array of devices to check (NULL-terminated)
467 * @num_clients: number of clients in the array
468 * @verbose: if true, print warnings for devices when we return -1
470 * Returns -1 if any of the clients are not compatible (behind the same
471 * root port as the provider), otherwise returns a positive number where
472 * a lower number is the preferable choice. (If there's one client
473 * that's the same as the provider it will return 0, which is best choice).
475 * For now, "compatible" means the provider and the clients are all behind
476 * the same PCI root port. This cuts out cases that may work but is safest
477 * for the user. Future work can expand this to white-list root complexes that
478 * can safely forward between each ports.
480 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
481 int num_clients, bool verbose)
483 bool not_supported = false;
484 struct pci_dev *pci_client;
489 if (num_clients == 0)
492 for (i = 0; i < num_clients; i++) {
493 if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
494 clients[i]->dma_ops == &dma_virt_ops) {
497 "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
501 pci_client = find_parent_pci_dev(clients[i]);
505 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
510 ret = upstream_bridge_distance_warn(provider,
511 pci_client, &distance);
513 ret = upstream_bridge_distance(provider, pci_client,
514 &distance, NULL, NULL);
516 pci_dev_put(pci_client);
518 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED)
519 not_supported = true;
521 if (not_supported && !verbose)
524 total_dist += distance;
532 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
535 * pci_has_p2pmem - check if a given PCI device has published any p2pmem
536 * @pdev: PCI device to check
538 bool pci_has_p2pmem(struct pci_dev *pdev)
540 return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
542 EXPORT_SYMBOL_GPL(pci_has_p2pmem);
545 * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with
546 * the specified list of clients and shortest distance (as determined
547 * by pci_p2pmem_dma())
548 * @clients: array of devices to check (NULL-terminated)
549 * @num_clients: number of client devices in the list
551 * If multiple devices are behind the same switch, the one "closest" to the
552 * client devices in use will be chosen first. (So if one of the providers is
553 * the same as one of the clients, that provider will be used ahead of any
554 * other providers that are unrelated). If multiple providers are an equal
555 * distance away, one will be chosen at random.
557 * Returns a pointer to the PCI device with a reference taken (use pci_dev_put
558 * to return the reference) or NULL if no compatible device is found. The
559 * found provider will also be assigned to the client list.
561 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
563 struct pci_dev *pdev = NULL;
565 int closest_distance = INT_MAX;
566 struct pci_dev **closest_pdevs;
568 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
571 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
575 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
576 if (!pci_has_p2pmem(pdev))
579 distance = pci_p2pdma_distance_many(pdev, clients,
581 if (distance < 0 || distance > closest_distance)
584 if (distance == closest_distance && dev_cnt >= max_devs)
587 if (distance < closest_distance) {
588 for (i = 0; i < dev_cnt; i++)
589 pci_dev_put(closest_pdevs[i]);
592 closest_distance = distance;
595 closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
599 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
601 for (i = 0; i < dev_cnt; i++)
602 pci_dev_put(closest_pdevs[i]);
604 kfree(closest_pdevs);
607 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
610 * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory
611 * @pdev: the device to allocate memory from
612 * @size: number of bytes to allocate
614 * Returns the allocated memory or NULL on error.
616 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
619 struct percpu_ref *ref;
622 * Pairs with synchronize_rcu() in pci_p2pdma_release() to
623 * ensure pdev->p2pdma is non-NULL for the duration of the
627 if (unlikely(!pdev->p2pdma))
630 ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
635 if (unlikely(!percpu_ref_tryget_live(ref))) {
636 gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
644 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
647 * pci_free_p2pmem - free peer-to-peer DMA memory
648 * @pdev: the device the memory was allocated from
649 * @addr: address of the memory that was allocated
650 * @size: number of bytes that were allocated
652 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
654 struct percpu_ref *ref;
656 gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
660 EXPORT_SYMBOL_GPL(pci_free_p2pmem);
663 * pci_virt_to_bus - return the PCI bus address for a given virtual
664 * address obtained with pci_alloc_p2pmem()
665 * @pdev: the device the memory was allocated from
666 * @addr: address of the memory that was allocated
668 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
676 * Note: when we added the memory to the pool we used the PCI
677 * bus address as the physical address. So gen_pool_virt_to_phys()
678 * actually returns the bus address despite the misleading name.
680 return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
682 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
685 * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist
686 * @pdev: the device to allocate memory from
687 * @nents: the number of SG entries in the list
688 * @length: number of bytes to allocate
690 * Return: %NULL on error or &struct scatterlist pointer and @nents on success
692 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
693 unsigned int *nents, u32 length)
695 struct scatterlist *sg;
698 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
702 sg_init_table(sg, 1);
704 addr = pci_alloc_p2pmem(pdev, length);
708 sg_set_buf(sg, addr, length);
716 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
719 * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl()
720 * @pdev: the device to allocate memory from
721 * @sgl: the allocated scatterlist
723 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
725 struct scatterlist *sg;
728 for_each_sg(sgl, sg, INT_MAX, count) {
732 pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
736 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
739 * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by
740 * other devices with pci_p2pmem_find()
741 * @pdev: the device with peer-to-peer DMA memory to publish
742 * @publish: set to true to publish the memory, false to unpublish it
744 * Published memory can be used by other PCI device drivers for
745 * peer-2-peer DMA operations. Non-published memory is reserved for
746 * exclusive use of the device driver that registers the peer-to-peer
749 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
752 pdev->p2pdma->p2pmem_published = publish;
754 EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
757 * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
758 * @dev: device doing the DMA request
759 * @sg: scatter list to map
760 * @nents: elements in the scatterlist
761 * @dir: DMA direction
763 * Scatterlists mapped with this function should not be unmapped in any way.
765 * Returns the number of SG entries mapped or 0 on error.
767 int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
768 enum dma_data_direction dir)
770 struct pci_p2pdma_pagemap *p2p_pgmap;
771 struct scatterlist *s;
776 * p2pdma mappings are not compatible with devices that use
777 * dma_virt_ops. If the upper layers do the right thing
778 * this should never happen because it will be prevented
779 * by the check in pci_p2pdma_distance_many()
781 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
782 dev->dma_ops == &dma_virt_ops))
785 for_each_sg(sg, s, nents, i) {
786 p2p_pgmap = to_p2p_pgmap(sg_page(s)->pgmap);
789 s->dma_address = paddr - p2p_pgmap->bus_offset;
790 sg_dma_len(s) = s->length;
795 EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);
798 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
800 * @page: contents of the value to be stored
801 * @p2p_dev: returns the PCI device that was selected to be used
802 * (if one was specified in the stored value)
803 * @use_p2pdma: returns whether to enable p2pdma or not
805 * Parses an attribute value to decide whether to enable p2pdma.
806 * The value can select a PCI device (using its full BDF device
807 * name) or a boolean (in any format strtobool() accepts). A false
808 * value disables p2pdma, a true value expects the caller
809 * to automatically find a compatible device and specifying a PCI device
810 * expects the caller to use the specific provider.
812 * pci_p2pdma_enable_show() should be used as the show operation for
815 * Returns 0 on success
817 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
822 dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
825 *p2p_dev = to_pci_dev(dev);
827 if (!pci_has_p2pmem(*p2p_dev)) {
829 "PCI device has no peer-to-peer memory: %s\n",
831 pci_dev_put(*p2p_dev);
836 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
838 * If the user enters a PCI device that doesn't exist
839 * like "0000:01:00.1", we don't want strtobool to think
840 * it's a '0' when it's clearly not what the user wanted.
841 * So we require 0's and 1's to be exactly one character.
843 } else if (!strtobool(page, use_p2pdma)) {
847 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
850 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
853 * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating
854 * whether p2pdma is enabled
855 * @page: contents of the stored value
856 * @p2p_dev: the selected p2p device (NULL if no device is selected)
857 * @use_p2pdma: whether p2pdma has been enabled
859 * Attributes that use pci_p2pdma_enable_store() should use this function
860 * to show the value of the attribute.
862 * Returns 0 on success
864 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
868 return sprintf(page, "0\n");
871 return sprintf(page, "1\n");
873 return sprintf(page, "%s\n", pci_name(p2p_dev));
875 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);