2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
17 * Copyright(c) 2012 Intel Corporation. All rights reserved.
18 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copy
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 * Intel PCIe NTB Linux driver
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/init.h>
53 #include <linux/interrupt.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/random.h>
57 #include <linux/slab.h>
58 #include <linux/ntb.h>
60 #include "ntb_hw_intel.h"
61 #include "ntb_hw_gen1.h"
62 #include "ntb_hw_gen3.h"
64 #define NTB_NAME "ntb_hw_intel"
65 #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
68 MODULE_DESCRIPTION(NTB_DESC);
69 MODULE_VERSION(NTB_VER);
70 MODULE_LICENSE("Dual BSD/GPL");
71 MODULE_AUTHOR("Intel Corporation");
73 #define bar0_off(base, bar) ((base) + ((bar) << 2))
74 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
76 static const struct intel_ntb_reg xeon_reg;
77 static const struct intel_ntb_alt_reg xeon_pri_reg;
78 static const struct intel_ntb_alt_reg xeon_sec_reg;
79 static const struct intel_ntb_alt_reg xeon_b2b_reg;
80 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
81 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
82 static const struct ntb_dev_ops intel_ntb_ops;
84 static const struct file_operations intel_ntb_debugfs_info;
85 static struct dentry *debugfs_dir;
87 static int b2b_mw_idx = -1;
88 module_param(b2b_mw_idx, int, 0644);
89 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
90 "value of zero or positive starts from first mw idx, and a "
91 "negative value starts from last mw idx. Both sides MUST "
92 "set the same value here!");
94 static unsigned int b2b_mw_share;
95 module_param(b2b_mw_share, uint, 0644);
96 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
97 "ntb so that the peer ntb only occupies the first half of "
98 "the mw, so the second half can still be used as a mw. Both "
99 "sides MUST set the same value here!");
101 module_param_named(xeon_b2b_usd_bar2_addr64,
102 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
103 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
104 "XEON B2B USD BAR 2 64-bit address");
106 module_param_named(xeon_b2b_usd_bar4_addr64,
107 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
108 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
109 "XEON B2B USD BAR 4 64-bit address");
111 module_param_named(xeon_b2b_usd_bar4_addr32,
112 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
113 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
114 "XEON B2B USD split-BAR 4 32-bit address");
116 module_param_named(xeon_b2b_usd_bar5_addr32,
117 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
118 MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
119 "XEON B2B USD split-BAR 5 32-bit address");
121 module_param_named(xeon_b2b_dsd_bar2_addr64,
122 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
123 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
124 "XEON B2B DSD BAR 2 64-bit address");
126 module_param_named(xeon_b2b_dsd_bar4_addr64,
127 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
128 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
129 "XEON B2B DSD BAR 4 64-bit address");
131 module_param_named(xeon_b2b_dsd_bar4_addr32,
132 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
133 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
134 "XEON B2B DSD split-BAR 4 32-bit address");
136 module_param_named(xeon_b2b_dsd_bar5_addr32,
137 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
138 MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
139 "XEON B2B DSD split-BAR 5 32-bit address");
142 static int xeon_init_isr(struct intel_ntb_dev *ndev);
144 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
146 ndev->unsafe_flags = 0;
147 ndev->unsafe_flags_ignore = 0;
149 /* Only B2B has a workaround to avoid SDOORBELL */
150 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
151 if (!ntb_topo_is_b2b(ndev->ntb.topo))
152 ndev->unsafe_flags |= NTB_UNSAFE_DB;
154 /* No low level workaround to avoid SB01BASE */
155 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
156 ndev->unsafe_flags |= NTB_UNSAFE_DB;
157 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
161 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
164 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
167 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
170 flag &= ndev->unsafe_flags;
171 ndev->unsafe_flags_ignore |= flag;
176 int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
178 if (idx < 0 || idx >= ndev->mw_count)
180 return ndev->reg->mw_bar[idx];
183 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
184 phys_addr_t *db_addr, resource_size_t *db_size,
185 phys_addr_t reg_addr, unsigned long reg)
187 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
188 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
191 *db_addr = reg_addr + reg;
192 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
196 *db_size = ndev->reg->db_size;
197 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
203 u64 ndev_db_read(struct intel_ntb_dev *ndev,
206 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
207 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
209 return ndev->reg->db_ioread(mmio);
212 int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
215 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
216 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
218 if (db_bits & ~ndev->db_valid_mask)
221 ndev->reg->db_iowrite(db_bits, mmio);
226 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
229 unsigned long irqflags;
231 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
232 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
234 if (db_bits & ~ndev->db_valid_mask)
237 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
239 ndev->db_mask |= db_bits;
240 ndev->reg->db_iowrite(ndev->db_mask, mmio);
242 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
247 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
250 unsigned long irqflags;
252 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
253 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
255 if (db_bits & ~ndev->db_valid_mask)
258 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
260 ndev->db_mask &= ~db_bits;
261 ndev->reg->db_iowrite(ndev->db_mask, mmio);
263 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
268 static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
272 shift = ndev->db_vec_shift;
273 mask = BIT_ULL(shift) - 1;
275 return mask << (shift * db_vector);
278 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
279 phys_addr_t *spad_addr, phys_addr_t reg_addr,
282 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
283 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
285 if (idx < 0 || idx >= ndev->spad_count)
289 *spad_addr = reg_addr + reg + (idx << 2);
290 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
297 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
300 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
301 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
303 if (idx < 0 || idx >= ndev->spad_count)
306 return ioread32(mmio + (idx << 2));
309 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
312 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
313 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
315 if (idx < 0 || idx >= ndev->spad_count)
318 iowrite32(val, mmio + (idx << 2));
323 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
327 vec_mask = ndev_vec_mask(ndev, vec);
329 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
330 vec_mask |= ndev->db_link_mask;
332 dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
334 ndev->last_ts = jiffies;
336 if (vec_mask & ndev->db_link_mask) {
337 if (ndev->reg->poll_link(ndev))
338 ntb_link_event(&ndev->ntb);
341 if (vec_mask & ndev->db_valid_mask)
342 ntb_db_event(&ndev->ntb, vec);
347 static irqreturn_t ndev_vec_isr(int irq, void *dev)
349 struct intel_ntb_vec *nvec = dev;
351 dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
354 return ndev_interrupt(nvec->ndev, nvec->num);
357 static irqreturn_t ndev_irq_isr(int irq, void *dev)
359 struct intel_ntb_dev *ndev = dev;
361 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
364 int ndev_init_isr(struct intel_ntb_dev *ndev,
365 int msix_min, int msix_max,
366 int msix_shift, int total_shift)
368 struct pci_dev *pdev;
369 int rc, i, msix_count, node;
371 pdev = ndev->ntb.pdev;
373 node = dev_to_node(&pdev->dev);
375 /* Mask all doorbell interrupts */
376 ndev->db_mask = ndev->db_valid_mask;
377 ndev->reg->db_iowrite(ndev->db_mask,
379 ndev->self_reg->db_mask);
381 /* Try to set up msix irq */
383 ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
386 goto err_msix_vec_alloc;
388 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
393 for (i = 0; i < msix_max; ++i)
394 ndev->msix[i].entry = i;
396 msix_count = pci_enable_msix_range(pdev, ndev->msix,
399 goto err_msix_enable;
401 for (i = 0; i < msix_count; ++i) {
402 ndev->vec[i].ndev = ndev;
403 ndev->vec[i].num = i;
404 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
405 "ndev_vec_isr", &ndev->vec[i]);
407 goto err_msix_request;
410 dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
411 ndev->db_vec_count = msix_count;
412 ndev->db_vec_shift = msix_shift;
417 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
418 pci_disable_msix(pdev);
427 /* Try to set up msi irq */
429 rc = pci_enable_msi(pdev);
433 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
434 "ndev_irq_isr", ndev);
436 goto err_msi_request;
438 dev_dbg(&pdev->dev, "Using msi interrupts\n");
439 ndev->db_vec_count = 1;
440 ndev->db_vec_shift = total_shift;
444 pci_disable_msi(pdev);
447 /* Try to set up intx irq */
451 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
452 "ndev_irq_isr", ndev);
454 goto err_intx_request;
456 dev_dbg(&pdev->dev, "Using intx interrupts\n");
457 ndev->db_vec_count = 1;
458 ndev->db_vec_shift = total_shift;
465 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
467 struct pci_dev *pdev;
470 pdev = ndev->ntb.pdev;
472 /* Mask all doorbell interrupts */
473 ndev->db_mask = ndev->db_valid_mask;
474 ndev->reg->db_iowrite(ndev->db_mask,
476 ndev->self_reg->db_mask);
479 i = ndev->db_vec_count;
481 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
482 pci_disable_msix(pdev);
486 free_irq(pdev->irq, ndev);
487 if (pci_dev_msi_enabled(pdev))
488 pci_disable_msi(pdev);
492 static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
493 size_t count, loff_t *offp)
495 struct intel_ntb_dev *ndev;
496 struct pci_dev *pdev;
501 union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
503 ndev = filp->private_data;
504 pdev = ndev->ntb.pdev;
505 mmio = ndev->self_mmio;
507 buf_size = min(count, 0x800ul);
509 buf = kmalloc(buf_size, GFP_KERNEL);
515 off += scnprintf(buf + off, buf_size - off,
516 "NTB Device Information:\n");
518 off += scnprintf(buf + off, buf_size - off,
519 "Connection Topology -\t%s\n",
520 ntb_topo_string(ndev->ntb.topo));
522 if (ndev->b2b_idx != UINT_MAX) {
523 off += scnprintf(buf + off, buf_size - off,
524 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
525 off += scnprintf(buf + off, buf_size - off,
526 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
529 off += scnprintf(buf + off, buf_size - off,
530 "BAR4 Split -\t\t%s\n",
531 ndev->bar4_split ? "yes" : "no");
533 off += scnprintf(buf + off, buf_size - off,
534 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
535 off += scnprintf(buf + off, buf_size - off,
536 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
538 if (!ndev->reg->link_is_up(ndev)) {
539 off += scnprintf(buf + off, buf_size - off,
540 "Link Status -\t\tDown\n");
542 off += scnprintf(buf + off, buf_size - off,
543 "Link Status -\t\tUp\n");
544 off += scnprintf(buf + off, buf_size - off,
545 "Link Speed -\t\tPCI-E Gen %u\n",
546 NTB_LNK_STA_SPEED(ndev->lnk_sta));
547 off += scnprintf(buf + off, buf_size - off,
548 "Link Width -\t\tx%u\n",
549 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
552 off += scnprintf(buf + off, buf_size - off,
553 "Memory Window Count -\t%u\n", ndev->mw_count);
554 off += scnprintf(buf + off, buf_size - off,
555 "Scratchpad Count -\t%u\n", ndev->spad_count);
556 off += scnprintf(buf + off, buf_size - off,
557 "Doorbell Count -\t%u\n", ndev->db_count);
558 off += scnprintf(buf + off, buf_size - off,
559 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
560 off += scnprintf(buf + off, buf_size - off,
561 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
563 off += scnprintf(buf + off, buf_size - off,
564 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
565 off += scnprintf(buf + off, buf_size - off,
566 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
567 off += scnprintf(buf + off, buf_size - off,
568 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
570 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
571 off += scnprintf(buf + off, buf_size - off,
572 "Doorbell Mask -\t\t%#llx\n", u.v64);
574 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
575 off += scnprintf(buf + off, buf_size - off,
576 "Doorbell Bell -\t\t%#llx\n", u.v64);
578 off += scnprintf(buf + off, buf_size - off,
579 "\nNTB Window Size:\n");
581 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
582 off += scnprintf(buf + off, buf_size - off,
583 "PBAR23SZ %hhu\n", u.v8);
584 if (!ndev->bar4_split) {
585 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
586 off += scnprintf(buf + off, buf_size - off,
587 "PBAR45SZ %hhu\n", u.v8);
589 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
590 off += scnprintf(buf + off, buf_size - off,
591 "PBAR4SZ %hhu\n", u.v8);
592 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
593 off += scnprintf(buf + off, buf_size - off,
594 "PBAR5SZ %hhu\n", u.v8);
597 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
598 off += scnprintf(buf + off, buf_size - off,
599 "SBAR23SZ %hhu\n", u.v8);
600 if (!ndev->bar4_split) {
601 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
602 off += scnprintf(buf + off, buf_size - off,
603 "SBAR45SZ %hhu\n", u.v8);
605 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
606 off += scnprintf(buf + off, buf_size - off,
607 "SBAR4SZ %hhu\n", u.v8);
608 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
609 off += scnprintf(buf + off, buf_size - off,
610 "SBAR5SZ %hhu\n", u.v8);
613 off += scnprintf(buf + off, buf_size - off,
614 "\nNTB Incoming XLAT:\n");
616 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
617 off += scnprintf(buf + off, buf_size - off,
618 "XLAT23 -\t\t%#018llx\n", u.v64);
620 if (ndev->bar4_split) {
621 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
622 off += scnprintf(buf + off, buf_size - off,
623 "XLAT4 -\t\t\t%#06x\n", u.v32);
625 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
626 off += scnprintf(buf + off, buf_size - off,
627 "XLAT5 -\t\t\t%#06x\n", u.v32);
629 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
630 off += scnprintf(buf + off, buf_size - off,
631 "XLAT45 -\t\t%#018llx\n", u.v64);
634 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
635 off += scnprintf(buf + off, buf_size - off,
636 "LMT23 -\t\t\t%#018llx\n", u.v64);
638 if (ndev->bar4_split) {
639 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
640 off += scnprintf(buf + off, buf_size - off,
641 "LMT4 -\t\t\t%#06x\n", u.v32);
642 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
643 off += scnprintf(buf + off, buf_size - off,
644 "LMT5 -\t\t\t%#06x\n", u.v32);
646 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
647 off += scnprintf(buf + off, buf_size - off,
648 "LMT45 -\t\t\t%#018llx\n", u.v64);
651 if (pdev_is_gen1(pdev)) {
652 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
653 off += scnprintf(buf + off, buf_size - off,
654 "\nNTB Outgoing B2B XLAT:\n");
656 u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
657 off += scnprintf(buf + off, buf_size - off,
658 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
660 if (ndev->bar4_split) {
661 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
662 off += scnprintf(buf + off, buf_size - off,
663 "B2B XLAT4 -\t\t%#06x\n",
665 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
666 off += scnprintf(buf + off, buf_size - off,
667 "B2B XLAT5 -\t\t%#06x\n",
670 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
671 off += scnprintf(buf + off, buf_size - off,
672 "B2B XLAT45 -\t\t%#018llx\n",
676 u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
677 off += scnprintf(buf + off, buf_size - off,
678 "B2B LMT23 -\t\t%#018llx\n", u.v64);
680 if (ndev->bar4_split) {
681 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
682 off += scnprintf(buf + off, buf_size - off,
683 "B2B LMT4 -\t\t%#06x\n",
685 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
686 off += scnprintf(buf + off, buf_size - off,
687 "B2B LMT5 -\t\t%#06x\n",
690 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
691 off += scnprintf(buf + off, buf_size - off,
692 "B2B LMT45 -\t\t%#018llx\n",
696 off += scnprintf(buf + off, buf_size - off,
697 "\nNTB Secondary BAR:\n");
699 u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
700 off += scnprintf(buf + off, buf_size - off,
701 "SBAR01 -\t\t%#018llx\n", u.v64);
703 u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
704 off += scnprintf(buf + off, buf_size - off,
705 "SBAR23 -\t\t%#018llx\n", u.v64);
707 if (ndev->bar4_split) {
708 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
709 off += scnprintf(buf + off, buf_size - off,
710 "SBAR4 -\t\t\t%#06x\n", u.v32);
711 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
712 off += scnprintf(buf + off, buf_size - off,
713 "SBAR5 -\t\t\t%#06x\n", u.v32);
715 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
716 off += scnprintf(buf + off, buf_size - off,
717 "SBAR45 -\t\t%#018llx\n",
722 off += scnprintf(buf + off, buf_size - off,
723 "\nXEON NTB Statistics:\n");
725 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
726 off += scnprintf(buf + off, buf_size - off,
727 "Upstream Memory Miss -\t%u\n", u.v16);
729 off += scnprintf(buf + off, buf_size - off,
730 "\nXEON NTB Hardware Errors:\n");
732 if (!pci_read_config_word(pdev,
733 XEON_DEVSTS_OFFSET, &u.v16))
734 off += scnprintf(buf + off, buf_size - off,
735 "DEVSTS -\t\t%#06x\n", u.v16);
737 if (!pci_read_config_word(pdev,
738 XEON_LINK_STATUS_OFFSET, &u.v16))
739 off += scnprintf(buf + off, buf_size - off,
740 "LNKSTS -\t\t%#06x\n", u.v16);
742 if (!pci_read_config_dword(pdev,
743 XEON_UNCERRSTS_OFFSET, &u.v32))
744 off += scnprintf(buf + off, buf_size - off,
745 "UNCERRSTS -\t\t%#06x\n", u.v32);
747 if (!pci_read_config_dword(pdev,
748 XEON_CORERRSTS_OFFSET, &u.v32))
749 off += scnprintf(buf + off, buf_size - off,
750 "CORERRSTS -\t\t%#06x\n", u.v32);
753 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
758 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
759 size_t count, loff_t *offp)
761 struct intel_ntb_dev *ndev = filp->private_data;
763 if (pdev_is_gen1(ndev->ntb.pdev))
764 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
765 else if (pdev_is_gen3(ndev->ntb.pdev))
766 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
771 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
774 ndev->debugfs_dir = NULL;
775 ndev->debugfs_info = NULL;
778 debugfs_create_dir(pci_name(ndev->ntb.pdev),
780 if (!ndev->debugfs_dir)
781 ndev->debugfs_info = NULL;
784 debugfs_create_file("info", S_IRUSR,
785 ndev->debugfs_dir, ndev,
786 &intel_ntb_debugfs_info);
790 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
792 debugfs_remove_recursive(ndev->debugfs_dir);
795 int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
797 if (pidx != NTB_DEF_PEER_IDX)
800 return ntb_ndev(ntb)->mw_count;
803 int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
804 resource_size_t *addr_align,
805 resource_size_t *size_align,
806 resource_size_t *size_max)
808 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
809 resource_size_t bar_size, mw_size;
812 if (pidx != NTB_DEF_PEER_IDX)
815 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
818 bar = ndev_mw_to_bar(ndev, idx);
822 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
824 if (idx == ndev->b2b_idx)
825 mw_size = bar_size - ndev->b2b_off;
830 *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
841 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
842 dma_addr_t addr, resource_size_t size)
844 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
845 unsigned long base_reg, xlat_reg, limit_reg;
846 resource_size_t bar_size, mw_size;
848 u64 base, limit, reg_val;
851 if (pidx != NTB_DEF_PEER_IDX)
854 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
857 bar = ndev_mw_to_bar(ndev, idx);
861 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
863 if (idx == ndev->b2b_idx)
864 mw_size = bar_size - ndev->b2b_off;
868 /* hardware requires that addr is aligned to bar size */
869 if (addr & (bar_size - 1))
872 /* make sure the range fits in the usable mw size */
876 mmio = ndev->self_mmio;
877 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
878 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
879 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
881 if (bar < 4 || !ndev->bar4_split) {
882 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
884 /* Set the limit if supported, if size is not mw_size */
885 if (limit_reg && size != mw_size)
890 /* set and verify setting the translation address */
891 iowrite64(addr, mmio + xlat_reg);
892 reg_val = ioread64(mmio + xlat_reg);
893 if (reg_val != addr) {
894 iowrite64(0, mmio + xlat_reg);
898 /* set and verify setting the limit */
899 iowrite64(limit, mmio + limit_reg);
900 reg_val = ioread64(mmio + limit_reg);
901 if (reg_val != limit) {
902 iowrite64(base, mmio + limit_reg);
903 iowrite64(0, mmio + xlat_reg);
907 /* split bar addr range must all be 32 bit */
908 if (addr & (~0ull << 32))
910 if ((addr + size) & (~0ull << 32))
913 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
915 /* Set the limit if supported, if size is not mw_size */
916 if (limit_reg && size != mw_size)
921 /* set and verify setting the translation address */
922 iowrite32(addr, mmio + xlat_reg);
923 reg_val = ioread32(mmio + xlat_reg);
924 if (reg_val != addr) {
925 iowrite32(0, mmio + xlat_reg);
929 /* set and verify setting the limit */
930 iowrite32(limit, mmio + limit_reg);
931 reg_val = ioread32(mmio + limit_reg);
932 if (reg_val != limit) {
933 iowrite32(base, mmio + limit_reg);
934 iowrite32(0, mmio + xlat_reg);
942 u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
943 enum ntb_width *width)
945 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
947 if (ndev->reg->link_is_up(ndev)) {
949 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
951 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
954 /* TODO MAYBE: is it possible to observe the link speed and
955 * width while link is training? */
957 *speed = NTB_SPEED_NONE;
959 *width = NTB_WIDTH_NONE;
964 static int intel_ntb_link_enable(struct ntb_dev *ntb,
965 enum ntb_speed max_speed,
966 enum ntb_width max_width)
968 struct intel_ntb_dev *ndev;
971 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
973 if (ndev->ntb.topo == NTB_TOPO_SEC)
976 dev_dbg(&ntb->pdev->dev,
977 "Enabling link with max_speed %d max_width %d\n",
978 max_speed, max_width);
979 if (max_speed != NTB_SPEED_AUTO)
980 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
981 if (max_width != NTB_WIDTH_AUTO)
982 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
984 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
985 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
986 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
987 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
988 if (ndev->bar4_split)
989 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
990 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
995 int intel_ntb_link_disable(struct ntb_dev *ntb)
997 struct intel_ntb_dev *ndev;
1000 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1002 if (ndev->ntb.topo == NTB_TOPO_SEC)
1005 dev_dbg(&ntb->pdev->dev, "Disabling link\n");
1007 /* Bring NTB link down */
1008 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1009 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1010 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1011 if (ndev->bar4_split)
1012 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1013 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1014 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1019 int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
1021 /* Numbers of inbound and outbound memory windows match */
1022 return ntb_ndev(ntb)->mw_count;
1025 int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
1026 phys_addr_t *base, resource_size_t *size)
1028 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1031 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1034 bar = ndev_mw_to_bar(ndev, idx);
1039 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1040 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1043 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1044 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1049 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1051 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1054 u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1056 return ntb_ndev(ntb)->db_valid_mask;
1059 int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1061 struct intel_ntb_dev *ndev;
1063 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1065 return ndev->db_vec_count;
1068 u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1070 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1072 if (db_vector < 0 || db_vector > ndev->db_vec_count)
1075 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1078 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1080 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1082 return ndev_db_read(ndev,
1084 ndev->self_reg->db_bell);
1087 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1089 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1091 return ndev_db_write(ndev, db_bits,
1093 ndev->self_reg->db_bell);
1096 int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1098 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1100 return ndev_db_set_mask(ndev, db_bits,
1102 ndev->self_reg->db_mask);
1105 int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1107 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1109 return ndev_db_clear_mask(ndev, db_bits,
1111 ndev->self_reg->db_mask);
1114 int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
1115 resource_size_t *db_size)
1117 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1119 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1120 ndev->peer_reg->db_bell);
1123 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1125 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1127 return ndev_db_write(ndev, db_bits,
1129 ndev->peer_reg->db_bell);
1132 int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1134 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1137 int intel_ntb_spad_count(struct ntb_dev *ntb)
1139 struct intel_ntb_dev *ndev;
1141 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1143 return ndev->spad_count;
1146 u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1148 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1150 return ndev_spad_read(ndev, idx,
1152 ndev->self_reg->spad);
1155 int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
1157 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1159 return ndev_spad_write(ndev, idx, val,
1161 ndev->self_reg->spad);
1164 int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1165 phys_addr_t *spad_addr)
1167 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1169 return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
1170 ndev->peer_reg->spad);
1173 u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1175 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1177 return ndev_spad_read(ndev, sidx,
1179 ndev->peer_reg->spad);
1182 int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1185 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1187 return ndev_spad_write(ndev, sidx, val,
1189 ndev->peer_reg->spad);
1192 static u64 xeon_db_ioread(void __iomem *mmio)
1194 return (u64)ioread16(mmio);
1197 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
1199 iowrite16((u16)bits, mmio);
1202 static int xeon_poll_link(struct intel_ntb_dev *ndev)
1207 ndev->reg->db_iowrite(ndev->db_link_mask,
1209 ndev->self_reg->db_bell);
1211 rc = pci_read_config_word(ndev->ntb.pdev,
1212 XEON_LINK_STATUS_OFFSET, ®_val);
1216 if (reg_val == ndev->lnk_sta)
1219 ndev->lnk_sta = reg_val;
1224 int xeon_link_is_up(struct intel_ntb_dev *ndev)
1226 if (ndev->ntb.topo == NTB_TOPO_SEC)
1229 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1232 enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1234 switch (ppd & XEON_PPD_TOPO_MASK) {
1235 case XEON_PPD_TOPO_B2B_USD:
1236 return NTB_TOPO_B2B_USD;
1238 case XEON_PPD_TOPO_B2B_DSD:
1239 return NTB_TOPO_B2B_DSD;
1241 case XEON_PPD_TOPO_PRI_USD:
1242 case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1243 return NTB_TOPO_PRI;
1245 case XEON_PPD_TOPO_SEC_USD:
1246 case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1247 return NTB_TOPO_SEC;
1250 return NTB_TOPO_NONE;
1253 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1255 if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
1256 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
1262 static int xeon_init_isr(struct intel_ntb_dev *ndev)
1264 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1265 XEON_DB_MSIX_VECTOR_COUNT,
1266 XEON_DB_MSIX_VECTOR_SHIFT,
1267 XEON_DB_TOTAL_SHIFT);
1270 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
1272 ndev_deinit_isr(ndev);
1275 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1276 const struct intel_b2b_addr *addr,
1277 const struct intel_b2b_addr *peer_addr)
1279 struct pci_dev *pdev;
1281 resource_size_t bar_size;
1282 phys_addr_t bar_addr;
1286 pdev = ndev->ntb.pdev;
1287 mmio = ndev->self_mmio;
1289 if (ndev->b2b_idx == UINT_MAX) {
1290 dev_dbg(&pdev->dev, "not using b2b mw\n");
1294 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1298 dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
1300 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1302 dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
1304 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
1305 dev_dbg(&pdev->dev, "b2b using first half of bar\n");
1306 ndev->b2b_off = bar_size >> 1;
1307 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
1308 dev_dbg(&pdev->dev, "b2b using whole bar\n");
1312 dev_dbg(&pdev->dev, "b2b bar size is too small\n");
1317 /* Reset the secondary bar sizes to match the primary bar sizes,
1318 * except disable or halve the size of the b2b secondary bar.
1320 * Note: code for each specific bar size register, because the register
1321 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1323 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
1324 dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
1331 pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1332 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
1333 dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
1335 if (!ndev->bar4_split) {
1336 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
1337 dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
1344 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1345 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
1346 dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
1348 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
1349 dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
1356 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1357 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
1358 dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
1360 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
1361 dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
1368 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1369 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
1370 dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
1373 /* SBAR01 hit by first part of the b2b bar */
1375 bar_addr = addr->bar0_addr;
1376 else if (b2b_bar == 2)
1377 bar_addr = addr->bar2_addr64;
1378 else if (b2b_bar == 4 && !ndev->bar4_split)
1379 bar_addr = addr->bar4_addr64;
1380 else if (b2b_bar == 4)
1381 bar_addr = addr->bar4_addr32;
1382 else if (b2b_bar == 5)
1383 bar_addr = addr->bar5_addr32;
1387 dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
1388 iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
1390 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1391 * The b2b bar is either disabled above, or configured half-size, and
1392 * it starts at the PBAR xlat + offset.
1395 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1396 iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1397 bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
1398 dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
1400 if (!ndev->bar4_split) {
1401 bar_addr = addr->bar4_addr64 +
1402 (b2b_bar == 4 ? ndev->b2b_off : 0);
1403 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1404 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
1405 dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
1407 bar_addr = addr->bar4_addr32 +
1408 (b2b_bar == 4 ? ndev->b2b_off : 0);
1409 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1410 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
1411 dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
1413 bar_addr = addr->bar5_addr32 +
1414 (b2b_bar == 5 ? ndev->b2b_off : 0);
1415 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1416 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
1417 dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
1420 /* setup incoming bar limits == base addrs (zero length windows) */
1422 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1423 iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1424 bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
1425 dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
1427 if (!ndev->bar4_split) {
1428 bar_addr = addr->bar4_addr64 +
1429 (b2b_bar == 4 ? ndev->b2b_off : 0);
1430 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1431 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
1432 dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
1434 bar_addr = addr->bar4_addr32 +
1435 (b2b_bar == 4 ? ndev->b2b_off : 0);
1436 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
1437 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
1438 dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
1440 bar_addr = addr->bar5_addr32 +
1441 (b2b_bar == 5 ? ndev->b2b_off : 0);
1442 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
1443 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
1444 dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
1447 /* zero incoming translation addrs */
1448 iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
1450 if (!ndev->bar4_split) {
1451 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
1453 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
1454 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
1457 /* zero outgoing translation limits (whole bar size windows) */
1458 iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
1459 if (!ndev->bar4_split) {
1460 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
1462 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
1463 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
1466 /* set outgoing translation offsets */
1467 bar_addr = peer_addr->bar2_addr64;
1468 iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
1469 bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
1470 dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
1472 if (!ndev->bar4_split) {
1473 bar_addr = peer_addr->bar4_addr64;
1474 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
1475 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
1476 dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
1478 bar_addr = peer_addr->bar4_addr32;
1479 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
1480 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
1481 dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
1483 bar_addr = peer_addr->bar5_addr32;
1484 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
1485 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
1486 dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
1489 /* set the translation offset for b2b registers */
1491 bar_addr = peer_addr->bar0_addr;
1492 else if (b2b_bar == 2)
1493 bar_addr = peer_addr->bar2_addr64;
1494 else if (b2b_bar == 4 && !ndev->bar4_split)
1495 bar_addr = peer_addr->bar4_addr64;
1496 else if (b2b_bar == 4)
1497 bar_addr = peer_addr->bar4_addr32;
1498 else if (b2b_bar == 5)
1499 bar_addr = peer_addr->bar5_addr32;
1503 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1504 dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
1505 iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
1506 iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
1509 /* map peer ntb mmio config space registers */
1510 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1512 if (!ndev->peer_mmio)
1515 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
1521 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1523 struct device *dev = &ndev->ntb.pdev->dev;
1527 if (ndev->bar4_split)
1528 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1530 ndev->mw_count = XEON_MW_COUNT;
1532 ndev->spad_count = XEON_SPAD_COUNT;
1533 ndev->db_count = XEON_DB_COUNT;
1534 ndev->db_link_mask = XEON_DB_LINK_BIT;
1536 switch (ndev->ntb.topo) {
1538 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1539 dev_err(dev, "NTB Primary config disabled\n");
1543 /* enable link to allow secondary side device to appear */
1544 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1545 ntb_ctl &= ~NTB_CTL_DISABLE;
1546 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1548 /* use half the spads for the peer */
1549 ndev->spad_count >>= 1;
1550 ndev->self_reg = &xeon_pri_reg;
1551 ndev->peer_reg = &xeon_sec_reg;
1552 ndev->xlat_reg = &xeon_sec_xlat;
1556 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1557 dev_err(dev, "NTB Secondary config disabled\n");
1560 /* use half the spads for the peer */
1561 ndev->spad_count >>= 1;
1562 ndev->self_reg = &xeon_sec_reg;
1563 ndev->peer_reg = &xeon_pri_reg;
1564 ndev->xlat_reg = &xeon_pri_xlat;
1567 case NTB_TOPO_B2B_USD:
1568 case NTB_TOPO_B2B_DSD:
1569 ndev->self_reg = &xeon_pri_reg;
1570 ndev->peer_reg = &xeon_b2b_reg;
1571 ndev->xlat_reg = &xeon_sec_xlat;
1573 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1574 ndev->peer_reg = &xeon_pri_reg;
1577 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1579 ndev->b2b_idx = b2b_mw_idx;
1581 if (ndev->b2b_idx >= ndev->mw_count) {
1583 "b2b_mw_idx %d invalid for mw_count %u\n",
1584 b2b_mw_idx, ndev->mw_count);
1588 dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
1589 b2b_mw_idx, ndev->b2b_idx);
1591 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1592 dev_warn(dev, "Reduce doorbell count by 1\n");
1593 ndev->db_count -= 1;
1596 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1597 rc = xeon_setup_b2b_mw(ndev,
1599 &xeon_b2b_usd_addr);
1601 rc = xeon_setup_b2b_mw(ndev,
1603 &xeon_b2b_dsd_addr);
1608 /* Enable Bus Master and Memory Space on the secondary side */
1609 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1610 ndev->self_mmio + XEON_SPCICMD_OFFSET);
1618 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1620 ndev->reg->db_iowrite(ndev->db_valid_mask,
1622 ndev->self_reg->db_mask);
1627 static int xeon_init_dev(struct intel_ntb_dev *ndev)
1629 struct pci_dev *pdev;
1633 pdev = ndev->ntb.pdev;
1635 switch (pdev->device) {
1636 /* There is a Xeon hardware errata related to writes to SDOORBELL or
1637 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1638 * which may hang the system. To workaround this use the second memory
1639 * window to access the interrupt and scratch pad registers on the
1642 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1643 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1644 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1645 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1646 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1647 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1648 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1649 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1650 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1651 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1652 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1653 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1654 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1655 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1656 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1657 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1661 switch (pdev->device) {
1662 /* There is a hardware errata related to accessing any register in
1663 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1665 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1666 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1667 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1668 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1669 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1670 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1671 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1672 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1673 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1674 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1678 switch (pdev->device) {
1679 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
1680 * mirrored to the remote system. Shrink the number of bits by one,
1681 * since bit 14 is the last bit.
1683 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1684 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1685 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1686 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1687 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1688 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1689 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1690 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1691 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1692 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1693 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1694 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1695 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1696 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1697 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1698 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1702 ndev->reg = &xeon_reg;
1704 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1708 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1709 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
1710 ntb_topo_string(ndev->ntb.topo));
1711 if (ndev->ntb.topo == NTB_TOPO_NONE)
1714 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1715 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
1716 dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
1717 ppd, ndev->bar4_split);
1719 /* This is a way for transparent BAR to figure out if we are
1720 * doing split BAR or not. There is no way for the hw on the
1721 * transparent side to know and set the PPD.
1723 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1724 ndev->bar4_split = hweight32(mem) ==
1725 HSX_SPLIT_BAR_MW_COUNT + 1;
1726 dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
1727 mem, ndev->bar4_split);
1730 rc = xeon_init_ntb(ndev);
1734 return xeon_init_isr(ndev);
1737 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
1739 xeon_deinit_isr(ndev);
1742 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1746 pci_set_drvdata(pdev, ndev);
1748 rc = pci_enable_device(pdev);
1750 goto err_pci_enable;
1752 rc = pci_request_regions(pdev, NTB_NAME);
1754 goto err_pci_regions;
1756 pci_set_master(pdev);
1758 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1760 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1763 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1766 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1768 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1771 dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
1773 rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
1774 dma_get_mask(&pdev->dev));
1778 ndev->self_mmio = pci_iomap(pdev, 0, 0);
1779 if (!ndev->self_mmio) {
1783 ndev->peer_mmio = ndev->self_mmio;
1784 ndev->peer_addr = pci_resource_start(pdev, 0);
1790 pci_clear_master(pdev);
1791 pci_release_regions(pdev);
1793 pci_disable_device(pdev);
1795 pci_set_drvdata(pdev, NULL);
1799 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1801 struct pci_dev *pdev = ndev->ntb.pdev;
1803 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1804 pci_iounmap(pdev, ndev->peer_mmio);
1805 pci_iounmap(pdev, ndev->self_mmio);
1807 pci_clear_master(pdev);
1808 pci_release_regions(pdev);
1809 pci_disable_device(pdev);
1810 pci_set_drvdata(pdev, NULL);
1813 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1814 struct pci_dev *pdev)
1816 ndev->ntb.pdev = pdev;
1817 ndev->ntb.topo = NTB_TOPO_NONE;
1818 ndev->ntb.ops = &intel_ntb_ops;
1821 ndev->b2b_idx = UINT_MAX;
1823 ndev->bar4_split = 0;
1826 ndev->spad_count = 0;
1828 ndev->db_vec_count = 0;
1829 ndev->db_vec_shift = 0;
1834 ndev->db_valid_mask = 0;
1835 ndev->db_link_mask = 0;
1838 spin_lock_init(&ndev->db_mask_lock);
1841 static int intel_ntb_pci_probe(struct pci_dev *pdev,
1842 const struct pci_device_id *id)
1844 struct intel_ntb_dev *ndev;
1847 node = dev_to_node(&pdev->dev);
1849 if (pdev_is_gen1(pdev)) {
1850 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1856 ndev_init_struct(ndev, pdev);
1858 rc = intel_ntb_init_pci(ndev, pdev);
1862 rc = xeon_init_dev(ndev);
1866 } else if (pdev_is_gen3(pdev)) {
1867 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1873 ndev_init_struct(ndev, pdev);
1874 ndev->ntb.ops = &intel_ntb3_ops;
1876 rc = intel_ntb_init_pci(ndev, pdev);
1880 rc = gen3_init_dev(ndev);
1889 ndev_reset_unsafe_flags(ndev);
1891 ndev->reg->poll_link(ndev);
1893 ndev_init_debugfs(ndev);
1895 rc = ntb_register_device(&ndev->ntb);
1899 dev_info(&pdev->dev, "NTB device registered.\n");
1904 ndev_deinit_debugfs(ndev);
1905 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
1906 xeon_deinit_dev(ndev);
1908 intel_ntb_deinit_pci(ndev);
1915 static void intel_ntb_pci_remove(struct pci_dev *pdev)
1917 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1919 ntb_unregister_device(&ndev->ntb);
1920 ndev_deinit_debugfs(ndev);
1921 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
1922 xeon_deinit_dev(ndev);
1923 intel_ntb_deinit_pci(ndev);
1927 static const struct intel_ntb_reg xeon_reg = {
1928 .poll_link = xeon_poll_link,
1929 .link_is_up = xeon_link_is_up,
1930 .db_ioread = xeon_db_ioread,
1931 .db_iowrite = xeon_db_iowrite,
1932 .db_size = sizeof(u32),
1933 .ntb_ctl = XEON_NTBCNTL_OFFSET,
1934 .mw_bar = {2, 4, 5},
1937 static const struct intel_ntb_alt_reg xeon_pri_reg = {
1938 .db_bell = XEON_PDOORBELL_OFFSET,
1939 .db_mask = XEON_PDBMSK_OFFSET,
1940 .spad = XEON_SPAD_OFFSET,
1943 static const struct intel_ntb_alt_reg xeon_sec_reg = {
1944 .db_bell = XEON_SDOORBELL_OFFSET,
1945 .db_mask = XEON_SDBMSK_OFFSET,
1946 /* second half of the scratchpads */
1947 .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
1950 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
1951 .db_bell = XEON_B2B_DOORBELL_OFFSET,
1952 .spad = XEON_B2B_SPAD_OFFSET,
1955 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
1956 /* Note: no primary .bar0_base visible to the secondary side.
1958 * The secondary side cannot get the base address stored in primary
1959 * bars. The base address is necessary to set the limit register to
1960 * any value other than zero, or unlimited.
1962 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
1963 * window by setting the limit equal to base, nor can it limit the size
1964 * of the memory window by setting the limit to base + size.
1966 .bar2_limit = XEON_PBAR23LMT_OFFSET,
1967 .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
1970 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
1971 .bar0_base = XEON_SBAR0BASE_OFFSET,
1972 .bar2_limit = XEON_SBAR23LMT_OFFSET,
1973 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
1976 struct intel_b2b_addr xeon_b2b_usd_addr = {
1977 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
1978 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
1979 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
1980 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
1983 struct intel_b2b_addr xeon_b2b_dsd_addr = {
1984 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
1985 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
1986 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
1987 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
1990 /* operations for primary side of local ntb */
1991 static const struct ntb_dev_ops intel_ntb_ops = {
1992 .mw_count = intel_ntb_mw_count,
1993 .mw_get_align = intel_ntb_mw_get_align,
1994 .mw_set_trans = intel_ntb_mw_set_trans,
1995 .peer_mw_count = intel_ntb_peer_mw_count,
1996 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
1997 .link_is_up = intel_ntb_link_is_up,
1998 .link_enable = intel_ntb_link_enable,
1999 .link_disable = intel_ntb_link_disable,
2000 .db_is_unsafe = intel_ntb_db_is_unsafe,
2001 .db_valid_mask = intel_ntb_db_valid_mask,
2002 .db_vector_count = intel_ntb_db_vector_count,
2003 .db_vector_mask = intel_ntb_db_vector_mask,
2004 .db_read = intel_ntb_db_read,
2005 .db_clear = intel_ntb_db_clear,
2006 .db_set_mask = intel_ntb_db_set_mask,
2007 .db_clear_mask = intel_ntb_db_clear_mask,
2008 .peer_db_addr = intel_ntb_peer_db_addr,
2009 .peer_db_set = intel_ntb_peer_db_set,
2010 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2011 .spad_count = intel_ntb_spad_count,
2012 .spad_read = intel_ntb_spad_read,
2013 .spad_write = intel_ntb_spad_write,
2014 .peer_spad_addr = intel_ntb_peer_spad_addr,
2015 .peer_spad_read = intel_ntb_peer_spad_read,
2016 .peer_spad_write = intel_ntb_peer_spad_write,
2019 static const struct file_operations intel_ntb_debugfs_info = {
2020 .owner = THIS_MODULE,
2021 .open = simple_open,
2022 .read = ndev_debugfs_read,
2025 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2026 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2027 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2028 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2029 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2030 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2031 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2032 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2033 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2034 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2035 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2036 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2037 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2038 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2039 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2040 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2041 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
2044 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2046 static struct pci_driver intel_ntb_pci_driver = {
2047 .name = KBUILD_MODNAME,
2048 .id_table = intel_ntb_pci_tbl,
2049 .probe = intel_ntb_pci_probe,
2050 .remove = intel_ntb_pci_remove,
2053 static int __init intel_ntb_pci_driver_init(void)
2055 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2057 if (debugfs_initialized())
2058 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2060 return pci_register_driver(&intel_ntb_pci_driver);
2062 module_init(intel_ntb_pci_driver_init);
2064 static void __exit intel_ntb_pci_driver_exit(void)
2066 pci_unregister_driver(&intel_ntb_pci_driver);
2068 debugfs_remove_recursive(debugfs_dir);
2070 module_exit(intel_ntb_pci_driver_exit);