2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Intel PCIe NTB Linux driver
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dmaengine.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/errno.h>
53 #include <linux/export.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/slab.h>
58 #include <linux/types.h>
61 #define NTB_TRANSPORT_VERSION 3
63 static unsigned int transport_mtu = 0x401E;
64 module_param(transport_mtu, uint, 0644);
65 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
67 static unsigned char max_num_clients;
68 module_param(max_num_clients, byte, 0644);
69 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
71 static unsigned int copy_bytes = 1024;
72 module_param(copy_bytes, uint, 0644);
73 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
75 struct ntb_queue_entry {
76 /* ntb_queue list reference */
77 struct list_head entry;
78 /* pointers to data to be transfered */
84 struct ntb_transport_qp *qp;
86 struct ntb_payload_header __iomem *tx_hdr;
87 struct ntb_payload_header *rx_hdr;
96 struct ntb_transport_qp {
97 struct ntb_transport *transport;
98 struct ntb_device *ndev;
100 struct dma_chan *dma_chan;
104 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
106 struct ntb_rx_info __iomem *rx_info;
107 struct ntb_rx_info *remote_rx_info;
109 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
110 void *data, int len);
111 struct list_head tx_free_q;
112 spinlock_t ntb_tx_free_q_lock;
114 dma_addr_t tx_mw_phys;
115 unsigned int tx_index;
116 unsigned int tx_max_entry;
117 unsigned int tx_max_frame;
119 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
120 void *data, int len);
121 struct list_head rx_pend_q;
122 struct list_head rx_free_q;
123 spinlock_t ntb_rx_pend_q_lock;
124 spinlock_t ntb_rx_free_q_lock;
126 unsigned int rx_index;
127 unsigned int rx_max_entry;
128 unsigned int rx_max_frame;
129 dma_cookie_t last_cookie;
131 void (*event_handler)(void *data, int status);
132 struct delayed_work link_work;
133 struct work_struct link_cleanup;
135 struct dentry *debugfs_dir;
136 struct dentry *debugfs_stats;
155 struct ntb_transport_mw {
161 struct ntb_transport_client_dev {
162 struct list_head entry;
166 struct ntb_transport {
167 struct list_head entry;
168 struct list_head client_devs;
170 struct ntb_device *ndev;
171 struct ntb_transport_mw *mw;
172 struct ntb_transport_qp *qps;
173 unsigned int max_qps;
174 unsigned long qp_bitmap;
176 struct delayed_work link_work;
177 struct work_struct link_cleanup;
181 DESC_DONE_FLAG = 1 << 0,
182 LINK_DOWN_FLAG = 1 << 1,
185 struct ntb_payload_header {
203 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
204 #define NTB_QP_DEF_NUM_ENTRIES 100
205 #define NTB_LINK_DOWN_TIMEOUT 10
207 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
209 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
212 static int ntb_client_probe(struct device *dev)
214 const struct ntb_client *drv = container_of(dev->driver,
215 struct ntb_client, driver);
216 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
220 if (drv && drv->probe)
221 rc = drv->probe(pdev);
228 static int ntb_client_remove(struct device *dev)
230 const struct ntb_client *drv = container_of(dev->driver,
231 struct ntb_client, driver);
232 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
234 if (drv && drv->remove)
242 static struct bus_type ntb_bus_type = {
244 .match = ntb_match_bus,
245 .probe = ntb_client_probe,
246 .remove = ntb_client_remove,
249 static LIST_HEAD(ntb_transport_list);
251 static int ntb_bus_init(struct ntb_transport *nt)
253 if (list_empty(&ntb_transport_list)) {
254 int rc = bus_register(&ntb_bus_type);
259 list_add(&nt->entry, &ntb_transport_list);
264 static void ntb_bus_remove(struct ntb_transport *nt)
266 struct ntb_transport_client_dev *client_dev, *cd;
268 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
269 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
270 dev_name(&client_dev->dev));
271 list_del(&client_dev->entry);
272 device_unregister(&client_dev->dev);
275 list_del(&nt->entry);
277 if (list_empty(&ntb_transport_list))
278 bus_unregister(&ntb_bus_type);
281 static void ntb_client_release(struct device *dev)
283 struct ntb_transport_client_dev *client_dev;
284 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
290 * ntb_unregister_client_dev - Unregister NTB client device
291 * @device_name: Name of NTB client device
293 * Unregister an NTB client device with the NTB transport layer
295 void ntb_unregister_client_dev(char *device_name)
297 struct ntb_transport_client_dev *client, *cd;
298 struct ntb_transport *nt;
300 list_for_each_entry(nt, &ntb_transport_list, entry)
301 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
302 if (!strncmp(dev_name(&client->dev), device_name,
303 strlen(device_name))) {
304 list_del(&client->entry);
305 device_unregister(&client->dev);
308 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
311 * ntb_register_client_dev - Register NTB client device
312 * @device_name: Name of NTB client device
314 * Register an NTB client device with the NTB transport layer
316 int ntb_register_client_dev(char *device_name)
318 struct ntb_transport_client_dev *client_dev;
319 struct ntb_transport *nt;
322 if (list_empty(&ntb_transport_list))
325 list_for_each_entry(nt, &ntb_transport_list, entry) {
328 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
335 dev = &client_dev->dev;
337 /* setup and register client devices */
338 dev_set_name(dev, "%s%d", device_name, i);
339 dev->bus = &ntb_bus_type;
340 dev->release = ntb_client_release;
341 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
343 rc = device_register(dev);
349 list_add_tail(&client_dev->entry, &nt->client_devs);
356 ntb_unregister_client_dev(device_name);
360 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
363 * ntb_register_client - Register NTB client driver
364 * @drv: NTB client driver to be registered
366 * Register an NTB client driver with the NTB transport layer
368 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
370 int ntb_register_client(struct ntb_client *drv)
372 drv->driver.bus = &ntb_bus_type;
374 if (list_empty(&ntb_transport_list))
377 return driver_register(&drv->driver);
379 EXPORT_SYMBOL_GPL(ntb_register_client);
382 * ntb_unregister_client - Unregister NTB client driver
383 * @drv: NTB client driver to be unregistered
385 * Unregister an NTB client driver with the NTB transport layer
387 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
389 void ntb_unregister_client(struct ntb_client *drv)
391 driver_unregister(&drv->driver);
393 EXPORT_SYMBOL_GPL(ntb_unregister_client);
395 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
398 struct ntb_transport_qp *qp;
400 ssize_t ret, out_offset, out_count;
404 buf = kmalloc(out_count, GFP_KERNEL);
408 qp = filp->private_data;
410 out_offset += snprintf(buf + out_offset, out_count - out_offset,
412 out_offset += snprintf(buf + out_offset, out_count - out_offset,
413 "rx_bytes - \t%llu\n", qp->rx_bytes);
414 out_offset += snprintf(buf + out_offset, out_count - out_offset,
415 "rx_pkts - \t%llu\n", qp->rx_pkts);
416 out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
418 out_offset += snprintf(buf + out_offset, out_count - out_offset,
419 "rx_async - \t%llu\n", qp->rx_async);
420 out_offset += snprintf(buf + out_offset, out_count - out_offset,
421 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
422 out_offset += snprintf(buf + out_offset, out_count - out_offset,
423 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
424 out_offset += snprintf(buf + out_offset, out_count - out_offset,
425 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
426 out_offset += snprintf(buf + out_offset, out_count - out_offset,
427 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
428 out_offset += snprintf(buf + out_offset, out_count - out_offset,
429 "rx_buff - \t%p\n", qp->rx_buff);
430 out_offset += snprintf(buf + out_offset, out_count - out_offset,
431 "rx_index - \t%u\n", qp->rx_index);
432 out_offset += snprintf(buf + out_offset, out_count - out_offset,
433 "rx_max_entry - \t%u\n", qp->rx_max_entry);
435 out_offset += snprintf(buf + out_offset, out_count - out_offset,
436 "tx_bytes - \t%llu\n", qp->tx_bytes);
437 out_offset += snprintf(buf + out_offset, out_count - out_offset,
438 "tx_pkts - \t%llu\n", qp->tx_pkts);
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
440 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
441 out_offset += snprintf(buf + out_offset, out_count - out_offset,
442 "tx_async - \t%llu\n", qp->tx_async);
443 out_offset += snprintf(buf + out_offset, out_count - out_offset,
444 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
446 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "tx_mw - \t%p\n", qp->tx_mw);
449 out_offset += snprintf(buf + out_offset, out_count - out_offset,
450 "tx_index - \t%u\n", qp->tx_index);
451 out_offset += snprintf(buf + out_offset, out_count - out_offset,
452 "tx_max_entry - \t%u\n", qp->tx_max_entry);
454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
455 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
457 if (out_offset > out_count)
458 out_offset = out_count;
460 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
465 static const struct file_operations ntb_qp_debugfs_stats = {
466 .owner = THIS_MODULE,
468 .read = debugfs_read,
471 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
472 struct list_head *list)
476 spin_lock_irqsave(lock, flags);
477 list_add_tail(entry, list);
478 spin_unlock_irqrestore(lock, flags);
481 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
482 struct list_head *list)
484 struct ntb_queue_entry *entry;
487 spin_lock_irqsave(lock, flags);
488 if (list_empty(list)) {
492 entry = list_first_entry(list, struct ntb_queue_entry, entry);
493 list_del(&entry->entry);
495 spin_unlock_irqrestore(lock, flags);
500 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
503 struct ntb_transport_qp *qp = &nt->qps[qp_num];
504 unsigned int rx_size, num_qps_mw;
508 mw_max = ntb_max_mw(nt->ndev);
509 mw_num = QP_TO_MW(nt->ndev, qp_num);
511 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
513 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
514 num_qps_mw = nt->max_qps / mw_max + 1;
516 num_qps_mw = nt->max_qps / mw_max;
518 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
519 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
520 rx_size -= sizeof(struct ntb_rx_info);
522 qp->remote_rx_info = qp->rx_buff + rx_size;
524 /* Due to housekeeping, there must be atleast 2 buffs */
525 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
526 qp->rx_max_entry = rx_size / qp->rx_max_frame;
529 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
531 /* setup the hdr offsets with 0's */
532 for (i = 0; i < qp->rx_max_entry; i++) {
533 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
534 sizeof(struct ntb_payload_header);
535 memset(offset, 0, sizeof(struct ntb_payload_header));
543 static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
545 struct ntb_transport_mw *mw = &nt->mw[num_mw];
546 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
551 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
552 mw->virt_addr = NULL;
555 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
557 struct ntb_transport_mw *mw = &nt->mw[num_mw];
558 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
560 /* No need to re-setup */
561 if (mw->size == ALIGN(size, 4096))
565 ntb_free_mw(nt, num_mw);
567 /* Alloc memory for receiving data. Must be 4k aligned */
568 mw->size = ALIGN(size, 4096);
570 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
572 if (!mw->virt_addr) {
574 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
579 /* Notify HW the memory location of the receive buffer */
580 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
585 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
587 struct ntb_transport *nt = qp->transport;
588 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
590 if (qp->qp_link == NTB_LINK_DOWN) {
591 cancel_delayed_work_sync(&qp->link_work);
595 if (qp->event_handler)
596 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
598 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
599 qp->qp_link = NTB_LINK_DOWN;
602 static void ntb_qp_link_cleanup_work(struct work_struct *work)
604 struct ntb_transport_qp *qp = container_of(work,
605 struct ntb_transport_qp,
607 struct ntb_transport *nt = qp->transport;
609 ntb_qp_link_cleanup(qp);
611 if (nt->transport_link == NTB_LINK_UP)
612 schedule_delayed_work(&qp->link_work,
613 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
616 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
618 schedule_work(&qp->link_cleanup);
621 static void ntb_transport_link_cleanup(struct ntb_transport *nt)
625 /* Pass along the info to any clients */
626 for (i = 0; i < nt->max_qps; i++)
627 if (!test_bit(i, &nt->qp_bitmap))
628 ntb_qp_link_cleanup(&nt->qps[i]);
630 if (nt->transport_link == NTB_LINK_DOWN)
631 cancel_delayed_work_sync(&nt->link_work);
633 nt->transport_link = NTB_LINK_DOWN;
635 /* The scratchpad registers keep the values if the remote side
636 * goes down, blast them now to give them a sane value the next
637 * time they are accessed
639 for (i = 0; i < MAX_SPAD; i++)
640 ntb_write_local_spad(nt->ndev, i, 0);
643 static void ntb_transport_link_cleanup_work(struct work_struct *work)
645 struct ntb_transport *nt = container_of(work, struct ntb_transport,
648 ntb_transport_link_cleanup(nt);
651 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
653 struct ntb_transport *nt = data;
656 case NTB_EVENT_HW_LINK_UP:
657 schedule_delayed_work(&nt->link_work, 0);
659 case NTB_EVENT_HW_LINK_DOWN:
660 schedule_work(&nt->link_cleanup);
667 static void ntb_transport_link_work(struct work_struct *work)
669 struct ntb_transport *nt = container_of(work, struct ntb_transport,
671 struct ntb_device *ndev = nt->ndev;
672 struct pci_dev *pdev = ntb_query_pdev(ndev);
676 /* send the local info, in the opposite order of the way we read it */
677 for (i = 0; i < ntb_max_mw(ndev); i++) {
678 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
679 ntb_get_mw_size(ndev, i) >> 32);
681 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
682 (u32)(ntb_get_mw_size(ndev, i) >> 32),
683 MW0_SZ_HIGH + (i * 2));
687 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
688 (u32) ntb_get_mw_size(ndev, i));
690 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
691 (u32) ntb_get_mw_size(ndev, i),
692 MW0_SZ_LOW + (i * 2));
697 rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
699 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
700 ntb_max_mw(ndev), NUM_MWS);
704 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
706 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
707 nt->max_qps, NUM_QPS);
711 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
713 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
714 NTB_TRANSPORT_VERSION, VERSION);
718 /* Query the remote side for its info */
719 rc = ntb_read_remote_spad(ndev, VERSION, &val);
721 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
725 if (val != NTB_TRANSPORT_VERSION)
727 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
729 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
731 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
735 if (val != nt->max_qps)
737 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
739 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
741 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
745 if (val != ntb_max_mw(ndev))
747 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
749 for (i = 0; i < ntb_max_mw(ndev); i++) {
752 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
754 dev_err(&pdev->dev, "Error reading remote spad %d\n",
755 MW0_SZ_HIGH + (i * 2));
759 val64 = (u64) val << 32;
761 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
763 dev_err(&pdev->dev, "Error reading remote spad %d\n",
764 MW0_SZ_LOW + (i * 2));
770 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
772 rc = ntb_set_mw(nt, i, val64);
777 nt->transport_link = NTB_LINK_UP;
779 for (i = 0; i < nt->max_qps; i++) {
780 struct ntb_transport_qp *qp = &nt->qps[i];
782 ntb_transport_setup_qp_mw(nt, i);
784 if (qp->client_ready == NTB_LINK_UP)
785 schedule_delayed_work(&qp->link_work, 0);
791 for (i = 0; i < ntb_max_mw(ndev); i++)
794 if (ntb_hw_link_status(ndev))
795 schedule_delayed_work(&nt->link_work,
796 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
799 static void ntb_qp_link_work(struct work_struct *work)
801 struct ntb_transport_qp *qp = container_of(work,
802 struct ntb_transport_qp,
804 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
805 struct ntb_transport *nt = qp->transport;
808 WARN_ON(nt->transport_link != NTB_LINK_UP);
810 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
812 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
816 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
818 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
819 val | 1 << qp->qp_num, QP_LINKS);
821 /* query remote spad for qp ready bits */
822 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
824 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
826 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
828 /* See if the remote side is up */
829 if (1 << qp->qp_num & val) {
830 qp->qp_link = NTB_LINK_UP;
832 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
833 if (qp->event_handler)
834 qp->event_handler(qp->cb_data, NTB_LINK_UP);
835 } else if (nt->transport_link == NTB_LINK_UP)
836 schedule_delayed_work(&qp->link_work,
837 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
840 static int ntb_transport_init_queue(struct ntb_transport *nt,
843 struct ntb_transport_qp *qp;
844 unsigned int num_qps_mw, tx_size;
848 mw_max = ntb_max_mw(nt->ndev);
849 mw_num = QP_TO_MW(nt->ndev, qp_num);
851 qp = &nt->qps[qp_num];
855 qp->qp_link = NTB_LINK_DOWN;
856 qp->client_ready = NTB_LINK_DOWN;
857 qp->event_handler = NULL;
859 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
860 num_qps_mw = nt->max_qps / mw_max + 1;
862 num_qps_mw = nt->max_qps / mw_max;
864 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
865 qp_offset = qp_num / mw_max * tx_size;
866 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
870 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
874 tx_size -= sizeof(struct ntb_rx_info);
875 qp->rx_info = qp->tx_mw + tx_size;
877 /* Due to housekeeping, there must be atleast 2 buffs */
878 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
879 qp->tx_max_entry = tx_size / qp->tx_max_frame;
881 if (ntb_query_debugfs(nt->ndev)) {
882 char debugfs_name[4];
884 snprintf(debugfs_name, 4, "qp%d", qp_num);
885 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
886 ntb_query_debugfs(nt->ndev));
888 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
890 &ntb_qp_debugfs_stats);
893 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
894 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
896 spin_lock_init(&qp->ntb_rx_pend_q_lock);
897 spin_lock_init(&qp->ntb_rx_free_q_lock);
898 spin_lock_init(&qp->ntb_tx_free_q_lock);
900 INIT_LIST_HEAD(&qp->rx_pend_q);
901 INIT_LIST_HEAD(&qp->rx_free_q);
902 INIT_LIST_HEAD(&qp->tx_free_q);
907 int ntb_transport_init(struct pci_dev *pdev)
909 struct ntb_transport *nt;
912 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
916 nt->ndev = ntb_register_transport(pdev, nt);
922 nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
930 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
932 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
934 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
941 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
943 for (i = 0; i < nt->max_qps; i++) {
944 rc = ntb_transport_init_queue(nt, i);
949 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
950 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
952 rc = ntb_register_event_callback(nt->ndev,
953 ntb_transport_event_callback);
957 INIT_LIST_HEAD(&nt->client_devs);
958 rc = ntb_bus_init(nt);
962 if (ntb_hw_link_status(nt->ndev))
963 schedule_delayed_work(&nt->link_work, 0);
968 ntb_unregister_event_callback(nt->ndev);
974 ntb_unregister_transport(nt->ndev);
980 void ntb_transport_free(void *transport)
982 struct ntb_transport *nt = transport;
983 struct ntb_device *ndev = nt->ndev;
986 ntb_transport_link_cleanup(nt);
988 /* verify that all the qp's are freed */
989 for (i = 0; i < nt->max_qps; i++) {
990 if (!test_bit(i, &nt->qp_bitmap))
991 ntb_transport_free_queue(&nt->qps[i]);
992 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
997 cancel_delayed_work_sync(&nt->link_work);
999 ntb_unregister_event_callback(ndev);
1001 for (i = 0; i < ntb_max_mw(ndev); i++)
1006 ntb_unregister_transport(ndev);
1010 static void ntb_rx_copy_callback(void *data)
1012 struct ntb_queue_entry *entry = data;
1013 struct ntb_transport_qp *qp = entry->qp;
1014 void *cb_data = entry->cb_data;
1015 unsigned int len = entry->len;
1016 struct ntb_payload_header *hdr = entry->rx_hdr;
1018 /* Ensure that the data is fully copied out before clearing the flag */
1022 iowrite32(entry->index, &qp->rx_info->entry);
1024 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1026 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1027 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1030 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1032 void *buf = entry->buf;
1033 size_t len = entry->len;
1035 memcpy(buf, offset, len);
1037 ntb_rx_copy_callback(entry);
1040 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1043 struct dma_async_tx_descriptor *txd;
1044 struct ntb_transport_qp *qp = entry->qp;
1045 struct dma_chan *chan = qp->dma_chan;
1046 struct dma_device *device;
1047 size_t pay_off, buff_off;
1048 struct dmaengine_unmap_data *unmap;
1049 dma_cookie_t cookie;
1050 void *buf = entry->buf;
1057 if (len < copy_bytes)
1060 device = chan->device;
1061 pay_off = (size_t) offset & ~PAGE_MASK;
1062 buff_off = (size_t) buf & ~PAGE_MASK;
1064 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1067 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1072 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1073 pay_off, len, DMA_TO_DEVICE);
1074 if (dma_mapping_error(device->dev, unmap->addr[0]))
1079 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1080 buff_off, len, DMA_FROM_DEVICE);
1081 if (dma_mapping_error(device->dev, unmap->addr[1]))
1084 unmap->from_cnt = 1;
1086 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1087 unmap->addr[0], len,
1088 DMA_PREP_INTERRUPT);
1092 txd->callback = ntb_rx_copy_callback;
1093 txd->callback_param = entry;
1094 dma_set_unmap(txd, unmap);
1096 cookie = dmaengine_submit(txd);
1097 if (dma_submit_error(cookie))
1100 dmaengine_unmap_put(unmap);
1102 qp->last_cookie = cookie;
1109 dmaengine_unmap_put(unmap);
1111 dmaengine_unmap_put(unmap);
1113 /* If the callbacks come out of order, the writing of the index to the
1114 * last completed will be out of order. This may result in the
1115 * receive stalling forever.
1117 dma_sync_wait(chan, qp->last_cookie);
1119 ntb_memcpy_rx(entry, offset);
1123 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1125 struct ntb_payload_header *hdr;
1126 struct ntb_queue_entry *entry;
1129 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1130 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1132 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1134 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1135 "no buffer - HDR ver %u, len %d, flags %x\n",
1136 hdr->ver, hdr->len, hdr->flags);
1137 qp->rx_err_no_buf++;
1141 if (!(hdr->flags & DESC_DONE_FLAG)) {
1142 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1144 qp->rx_ring_empty++;
1148 if (hdr->ver != (u32) qp->rx_pkts) {
1149 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1150 "qp %d: version mismatch, expected %llu - got %u\n",
1151 qp->qp_num, qp->rx_pkts, hdr->ver);
1152 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1158 if (hdr->flags & LINK_DOWN_FLAG) {
1159 ntb_qp_link_down(qp);
1164 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1165 "rx offset %u, ver %u - %d payload received, buf size %d\n",
1166 qp->rx_index, hdr->ver, hdr->len, entry->len);
1168 qp->rx_bytes += hdr->len;
1171 if (hdr->len > entry->len) {
1173 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1174 "RX overflow! Wanted %d got %d\n",
1175 hdr->len, entry->len);
1180 entry->index = qp->rx_index;
1181 entry->rx_hdr = hdr;
1183 ntb_async_rx(entry, offset, hdr->len);
1187 qp->rx_index %= qp->rx_max_entry;
1192 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1193 /* Ensure that the data is fully copied out before clearing the flag */
1196 iowrite32(qp->rx_index, &qp->rx_info->entry);
1201 static int ntb_transport_rxc_db(void *data, int db_num)
1203 struct ntb_transport_qp *qp = data;
1206 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1209 /* Limit the number of packets processed in a single interrupt to
1210 * provide fairness to others
1212 for (i = 0; i < qp->rx_max_entry; i++) {
1213 rc = ntb_process_rxc(qp);
1219 dma_async_issue_pending(qp->dma_chan);
1224 static void ntb_tx_copy_callback(void *data)
1226 struct ntb_queue_entry *entry = data;
1227 struct ntb_transport_qp *qp = entry->qp;
1228 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1230 /* Ensure that the data is fully copied out before setting the flags */
1232 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1234 ntb_ring_doorbell(qp->ndev, qp->qp_num);
1236 /* The entry length can only be zero if the packet is intended to be a
1237 * "link down" or similar. Since no payload is being sent in these
1238 * cases, there is nothing to add to the completion queue.
1240 if (entry->len > 0) {
1241 qp->tx_bytes += entry->len;
1244 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1248 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1251 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1253 memcpy_toio(offset, entry->buf, entry->len);
1255 ntb_tx_copy_callback(entry);
1258 static void ntb_async_tx(struct ntb_transport_qp *qp,
1259 struct ntb_queue_entry *entry)
1261 struct ntb_payload_header __iomem *hdr;
1262 struct dma_async_tx_descriptor *txd;
1263 struct dma_chan *chan = qp->dma_chan;
1264 struct dma_device *device;
1265 size_t dest_off, buff_off;
1266 struct dmaengine_unmap_data *unmap;
1268 dma_cookie_t cookie;
1269 void __iomem *offset;
1270 size_t len = entry->len;
1271 void *buf = entry->buf;
1273 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1274 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1275 entry->tx_hdr = hdr;
1277 iowrite32(entry->len, &hdr->len);
1278 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1283 if (len < copy_bytes)
1286 device = chan->device;
1287 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1288 buff_off = (size_t) buf & ~PAGE_MASK;
1289 dest_off = (size_t) dest & ~PAGE_MASK;
1291 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1294 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1299 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1300 buff_off, len, DMA_TO_DEVICE);
1301 if (dma_mapping_error(device->dev, unmap->addr[0]))
1306 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1307 DMA_PREP_INTERRUPT);
1311 txd->callback = ntb_tx_copy_callback;
1312 txd->callback_param = entry;
1313 dma_set_unmap(txd, unmap);
1315 cookie = dmaengine_submit(txd);
1316 if (dma_submit_error(cookie))
1319 dmaengine_unmap_put(unmap);
1321 dma_async_issue_pending(chan);
1326 dmaengine_unmap_put(unmap);
1328 dmaengine_unmap_put(unmap);
1330 ntb_memcpy_tx(entry, offset);
1334 static int ntb_process_tx(struct ntb_transport_qp *qp,
1335 struct ntb_queue_entry *entry)
1337 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1338 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
1340 if (qp->tx_index == qp->remote_rx_info->entry) {
1345 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1347 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1349 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1354 ntb_async_tx(qp, entry);
1357 qp->tx_index %= qp->tx_max_entry;
1364 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1366 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1367 struct ntb_queue_entry *entry;
1370 if (qp->qp_link == NTB_LINK_DOWN)
1373 qp->qp_link = NTB_LINK_DOWN;
1374 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1376 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1377 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1386 entry->cb_data = NULL;
1389 entry->flags = LINK_DOWN_FLAG;
1391 rc = ntb_process_tx(qp, entry);
1393 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1398 * ntb_transport_create_queue - Create a new NTB transport layer queue
1399 * @rx_handler: receive callback function
1400 * @tx_handler: transmit callback function
1401 * @event_handler: event callback function
1403 * Create a new NTB transport layer queue and provide the queue with a callback
1404 * routine for both transmit and receive. The receive callback routine will be
1405 * used to pass up data when the transport has received it on the queue. The
1406 * transmit callback routine will be called when the transport has completed the
1407 * transmission of the data on the queue and the data is ready to be freed.
1409 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1411 struct ntb_transport_qp *
1412 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1413 const struct ntb_queue_handlers *handlers)
1415 struct ntb_queue_entry *entry;
1416 struct ntb_transport_qp *qp;
1417 struct ntb_transport *nt;
1418 unsigned int free_queue;
1421 nt = ntb_find_transport(pdev);
1425 free_queue = ffs(nt->qp_bitmap);
1429 /* decrement free_queue to make it zero based */
1432 clear_bit(free_queue, &nt->qp_bitmap);
1434 qp = &nt->qps[free_queue];
1436 qp->rx_handler = handlers->rx_handler;
1437 qp->tx_handler = handlers->tx_handler;
1438 qp->event_handler = handlers->event_handler;
1441 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1442 if (!qp->dma_chan) {
1444 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1447 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1448 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1453 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1457 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1458 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1463 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1467 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1468 ntb_transport_rxc_db);
1472 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1477 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1480 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1484 set_bit(free_queue, &nt->qp_bitmap);
1488 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1491 * ntb_transport_free_queue - Frees NTB transport queue
1492 * @qp: NTB queue to be freed
1494 * Frees NTB transport queue
1496 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1498 struct pci_dev *pdev;
1499 struct ntb_queue_entry *entry;
1504 pdev = ntb_query_pdev(qp->ndev);
1507 struct dma_chan *chan = qp->dma_chan;
1508 /* Putting the dma_chan to NULL will force any new traffic to be
1509 * processed by the CPU instead of the DAM engine
1511 qp->dma_chan = NULL;
1513 /* Try to be nice and wait for any queued DMA engine
1514 * transactions to process before smashing it with a rock
1516 dma_sync_wait(chan, qp->last_cookie);
1517 dmaengine_terminate_all(chan);
1521 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1523 cancel_delayed_work_sync(&qp->link_work);
1525 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1528 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1529 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1533 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1536 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1538 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1540 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1543 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1544 * @qp: NTB queue to be freed
1545 * @len: pointer to variable to write enqueued buffers length
1547 * Dequeues unused buffers from receive queue. Should only be used during
1550 * RETURNS: NULL error value on error, or void* for success.
1552 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1554 struct ntb_queue_entry *entry;
1557 if (!qp || qp->client_ready == NTB_LINK_UP)
1560 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1564 buf = entry->cb_data;
1567 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1571 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1574 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1575 * @qp: NTB transport layer queue the entry is to be enqueued on
1576 * @cb: per buffer pointer for callback function to use
1577 * @data: pointer to data buffer that incoming packets will be copied into
1578 * @len: length of the data buffer
1580 * Enqueue a new receive buffer onto the transport queue into which a NTB
1581 * payload can be received into.
1583 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1585 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1588 struct ntb_queue_entry *entry;
1593 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1597 entry->cb_data = cb;
1601 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1605 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1608 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1609 * @qp: NTB transport layer queue the entry is to be enqueued on
1610 * @cb: per buffer pointer for callback function to use
1611 * @data: pointer to data buffer that will be sent
1612 * @len: length of the data buffer
1614 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1615 * payload will be transmitted. This assumes that a lock is being held to
1616 * serialize access to the qp.
1618 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1620 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1623 struct ntb_queue_entry *entry;
1626 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1629 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1631 qp->tx_err_no_buf++;
1635 entry->cb_data = cb;
1640 rc = ntb_process_tx(qp, entry);
1642 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1647 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1650 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1651 * @qp: NTB transport layer queue to be enabled
1653 * Notify NTB transport layer of client readiness to use queue
1655 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1660 qp->client_ready = NTB_LINK_UP;
1662 if (qp->transport->transport_link == NTB_LINK_UP)
1663 schedule_delayed_work(&qp->link_work, 0);
1665 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1668 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1669 * @qp: NTB transport layer queue to be disabled
1671 * Notify NTB transport layer of client's desire to no longer receive data on
1672 * transport queue specified. It is the client's responsibility to ensure all
1673 * entries on queue are purged or otherwise handled appropriately.
1675 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1677 struct pci_dev *pdev;
1683 pdev = ntb_query_pdev(qp->ndev);
1684 qp->client_ready = NTB_LINK_DOWN;
1686 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1688 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1692 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1693 val & ~(1 << qp->qp_num));
1695 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1696 val & ~(1 << qp->qp_num), QP_LINKS);
1698 if (qp->qp_link == NTB_LINK_UP)
1699 ntb_send_link_down(qp);
1701 cancel_delayed_work_sync(&qp->link_work);
1703 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1706 * ntb_transport_link_query - Query transport link state
1707 * @qp: NTB transport layer queue to be queried
1709 * Query connectivity to the remote system of the NTB transport queue
1711 * RETURNS: true for link up or false for link down
1713 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1718 return qp->qp_link == NTB_LINK_UP;
1720 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1723 * ntb_transport_qp_num - Query the qp number
1724 * @qp: NTB transport layer queue to be queried
1726 * Query qp number of the NTB transport queue
1728 * RETURNS: a zero based number specifying the qp number
1730 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1737 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1740 * ntb_transport_max_size - Query the max payload size of a qp
1741 * @qp: NTB transport layer queue to be queried
1743 * Query the maximum payload size permissible on the given qp
1745 * RETURNS: the max payload size of a qp
1747 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1755 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1757 /* If DMA engine usage is possible, try to find the max size for that */
1758 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1759 max -= max % (1 << qp->dma_chan->device->copy_align);
1763 EXPORT_SYMBOL_GPL(ntb_transport_max_size);